<FILEB>
<CHANGES>
viewscope = env.global_scope().context.cython_scope.viewscope
<CHANGEE>
<FILEE>
<FILEB>
elif packing == 'follow':
if has_strided:
raise CompileError(pos, "A memoryview cannot have both follow and strided axis specifiers.")
if not (is_c_contig or is_f_contig):
raise CompileError(pos, "Invalid use of the follow specifier.")
def _get_resolved_spec(env, spec):
# spec must be a NameNode or an AttributeNode
if isinstance(spec, NameNode):
return _resolve_NameNode(env, spec)
elif isinstance(spec, AttributeNode):
return _resolve_AttributeNode(env, spec)
else:
raise CompileError(spec.pos, INVALID_ERR)
def _resolve_NameNode(env, node):
try:
resolved_name = env.lookup(node.name).name
except AttributeError:
raise CompileError(node.pos, INVALID_ERR)
<CHANGES>
viewscope = env.context.cython_scope.viewscope
<CHANGEE>
return viewscope.lookup(resolved_name)
def _resolve_AttributeNode(env, node):
path = []
while isinstance(node, AttributeNode):
path.insert(0, node.attribute)
node = node.obj
if isinstance(node, NameNode):
path.insert(0, node.name)
else:
raise CompileError(node.pos, EXPR_ERR)
modnames = path[:-1]
# must be at least 1 module name, o/w not an AttributeNode.
<FILEE>
<SCANS>env, lu_name):
import CythonScope
cythonscope = env.global_scope().context.cython_scope
viewscope = cythonscope.viewscope
entry = viewscope.lookup_here(lu_name)
entry.used = 1
return entry
def use_cython_util_code(env, lu_name):
import CythonScope
cythonscope = env.global_scope().context.cython_scope
entry = cythonscope.lookup_here(lu_name)
entry.used = 1
return entry
def use_memview_util_code(env):
import CythonScope
return use_cython_view_util_code(env, CythonScope.memview_name)
def use_memview_cwrap(env):
import CythonScope
return use_cython_view_util_code(env, CythonScope.memview_cwrap_name)
def use_cython_array(env):
return use_cython_util_code(env, 'array')
def src_conforms_to_dst(src, dst):
'''returns True if src conforms to dst, False otherwise.'''
'''If conformable, the types are the same, the ndims are equal, and each axis spec is conformable.'''
'''Any packing/access spec is conformable to itself.'''
''''direct' and 'ptr' are conformable to 'full'.'''
''''contig' and 'follow' are conformable to 'strided'.'''
'''Any other combo is not conformable.'''
if src.dtype != dst.dtype:
return False
if len(src.axes) != len(dst.axes):
return False
for src_spec, dst_spec in zip(src.axes, dst.axes):
src_access, src_packing = src_spec
dst_access, dst_packing = dst_spec
if src_access != dst_access and dst_access != 'full':
return False
if src_packing != dst_packing and dst_packing != 'strided':
return False
return True
def get_copy_contents_name(from_mvs, to_mvs):
dtype = from_mvs.dtype
assert dtype == to_mvs.dtype
return ('__Pyx_BufferCopyContents_%s_%s_%s' %
(axes_to_str(from_mvs.axes),
axes_to_str(to_mvs.axes),
mangle_dtype_name(dtype)))
copy_template = '''static __Pyx_memviewslice %(copy_name)s(const __Pyx_memviewslice from_mvs) {'''
'''int i;'''
'''__Pyx_memviewslice new_mvs = {0, 0};'''
'''struct __pyx_obj_memoryview *from_memview = from_mvs.memview;'''
'''Py_buffer *buf = &from_memview->view;'''
'''PyObject *shape_tuple = 0;'''
'''
<FILEB>
<CHANGES>
self.assertContains(response, b"Currently")
<CHANGEE>
<FILEE>
<FILEB>
self.client.logout()
def test_inline_file_upload_edit_validation_error_post(self):
"""Test that inline file uploads correctly display prior data (#10002)."""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": six.text_type(self.picture.id),
"pictures-0-gallery": six.text_type(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post('/test_admin/%s/admin_views/gallery/%d/' % (self.urlbit, self.gallery.id), post_data)
<CHANGES>
self.assertTrue(response._container[0].find("Currently:") > -1)
<CHANGEE>
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminInlineTests(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
<FILEE>
<SCANS> self.client.post('/test_admin/admin/secure-view/', self.deleteuser_login)
self.assertRedirects(login, '/test_admin/admin/secure-view/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Regular User should not be able to login.
response = self.client.get('/test_admin/admin/secure-view/')
self.assertEqual(response.status_code, 200)
login = self.client.post('/test_admin/admin/secure-view/', self.joepublic_login)
self.assertEqual(login.status_code, 200)
# Login.context is a list of context dicts we just need to check the first one.
self.assertContains(login, ERROR_MESSAGE)
# 8509 - if a normal user is already logged in, it is possible
# to change user into the superuser without error
login = self.client.login(username='joepublic', password='secret')
# Check and make sure that if user expires, data still persists
self.client.get('/test_admin/admin/secure-view/')
self.client.post('/test_admin/admin/secure-view/', self.super_login)
# make sure the view removes test cookie
self.assertEqual(self.client.session.test_cookie_worked(), False)
def test_shortcut_view_only_available_to_staff(self):
"""Only admin users should be able to use the admin shortcut view."""
model_ctype = ContentType.objects.get_for_model(ModelWithStringPrimaryKey)
obj = ModelWithStringPrimaryKey.objects.create(string_pk='foo')
shortcut_url = "/test_admin/admin/r/%s/%s/" % (model_ctype.pk, obj.pk)
# Not logged in: we should see the login page.
response = self.client.get(shortcut_url, follow=False)
self.assertTemplateUsed(response, 'admin/login.html')
# Logged in? Redirect.
self.client.login(username='super', password='secret')
response = self.client.get(shortcut_url, follow=False)
# Can't use self.assertRedirects() because User.get_absolute_url() is silly.
self.assertEqual(response.status_code, 302)
self.assertEqual(response.url, 'http://example.com/dummy/foo/')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
<FILEB>
<CHANGES>
return [get_base_url() + data_location + filename +'s' for filename in result.get('filename')[index_number_start:index_number_end]]
<CHANGEE>
<FILEE>
<FILEB>
"""Returns"""
"""-------"""
"""out : list"""
"""Returns the filenames of the observation summary file"""
"""Examples"""
"""--------"""
""">>> import sunpy.instr.rhessi as rhessi"""
""">>> rhessi.get_obssum_filename(('2011/04/04', '2011/04/05')) # doctest: +SKIP"""
""".. note::"""
"""This API is currently limited to providing data from whole days only."""
# need to download and inspect the dbase file to determine the filename
# for the observing summary data
f = get_obssumm_dbase_file(time_range)
data_location = 'metadata/catalog/'
result = parse_obssumm_dbase_file(f[0])
_time_range = TimeRange(time_range)
index_number_start = _time_range.start.day - 1
index_number_end = _time_range.end.day - 1
<CHANGES>
return [data_servers[0] + data_location + filename + 's' for filename in result.get('filename')[index_number_start:index_number_end]]
<CHANGEE>
def get_obssumm_file(time_range):
"""Download the RHESSI observing summary data from one of the RHESSI"""
"""servers."""
"""Parameters"""
"""----------"""
"""time_range : `str`, `sunpy.time.TimeRange`"""
"""A TimeRange or time range compatible string"""
"""Returns"""
"""-------"""
"""out : tuple"""
"""Return a tuple (filename, headers) where filename is the local file"""
"""name under which the object can be found, and headers is"""
<FILEE>
<SCANS>
""">>> rhessi.get_obssumm_file(('2011/04/04', '2011/04/05')) # doctest: +SKIP"""
""".. note::"""
"""This API is currently limited to providing data from whole days only."""
time_range = TimeRange(time_range)
data_location = 'metadata/catalog/'
url_root = get_base_url() + data_location
url = url_root + get_obssum_filename(time_range)
print('Downloading file: ' + url)
f = urllib.request.urlretrieve(url)
return f
def parse_obssumm_file(filename):
"""Parse a RHESSI observation summary file."""
"""Parameters"""
"""----------"""
"""filename : str"""
"""The filename of a RHESSI fits file."""
"""Returns"""
"""-------"""
"""out : `dict`"""
"""Returns a dictionary."""
"""Examples"""
"""--------"""
""">>> import sunpy.instr.rhessi as rhessi"""
""">>> f = rhessi.get_obssumm_file(('2011/04/04', '2011/04/05')) # doctest: +SKIP"""
""">>> data = rhessi.parse_obssumm_file(f[0]) # doctest: +SKIP"""
afits = fits.open(filename)
header = afits[0].header
reference_time_ut = parse_time(afits[5].data.field('UT_REF')[0])
time_interval_sec = afits[5].data.field('TIME_INTV')[0]
# label_unit = fits[5].data.field('DIM1_UNIT')[0]
# labels = fits[5].data.field('DIM1_IDS')
labels = ['3 - 6 keV', '6 - 12 keV', '12 - 25 keV', '25 - 50 keV',
'50 - 100 keV', '100 - 300 keV', '300 - 800 keV', '800 - 7000 keV',
'7000 - 20000 keV']
# The data stored in the FITS file are "compressed" countrates stored as
# one byte
compressed_countrate = np.array(afits[6].data.field('countrate'))
countrate = uncompress_countrate(compressed_coun
<FILEB>
<CHANGES>
if currency.company_id.id!= company_id:
<CHANGEE>
<FILEE>
<FILEB>
}
}
if type in ('in_invoice', 'in_refund'):
result['value']['partner_bank'] = bank_id
if payment_term != partner_payment_term:
if partner_payment_term:
to_update = self.onchange_payment_term_date_invoice(
cr,uid,ids,partner_payment_term,date_invoice)
result['value'].update(to_update['value'])
else:
result['value']['date_due'] = False
if partner_bank_id != bank_id:
to_update = self.onchange_partner_bank(cr, uid, ids, bank_id)
result['value'].update(to_update['value'])
return result
def onchange_currency_id(self, cr, uid, ids, curr_id, company_id):
if curr_id:
currency = self.pool.get('res.currency').browse(cr, uid, curr_id)
<CHANGES>
if currency.company_id != company_id:
<CHANGEE>
raise osv.except_osv(_('Configration Error !'),
_('Can not select currency that is not related to current company.\nPlease select accordingly !.'))
return {}
def onchange_payment_term_date_invoice(self, cr, uid, ids, payment_term_id, date_invoice):
if not payment_term_id:
return {}
res={}
pt_obj= self.pool.get('account.payment.term')
if not date_invoice :
date_invoice = time.strftime('%Y-%m-%d')
pterm_list = pt_obj.compute(cr, uid, payment_term_id, value=1, date_ref=date_invoice)
if pterm_list:
<FILEE>
<SCANS> cr.execute('UPDATE account_invoice SET number=%s ' \
'WHERE id=%s', (number, id))
cr.execute('UPDATE account_move SET ref=%s ' \
'WHERE id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_move_line SET ref=%s ' \
'WHERE move_id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_analytic_line SET ref=%s ' \
'FROM account_move_line ' \
'WHERE account_move_line.move_id = %s ' \
'AND account_analytic_line.move_id = account_move_line.id',
(ref, move_id))
return True
def action_cancel(self, cr, uid, ids, *args):
account_move_obj = self.pool.get('account.move')
invoices = self.read(cr, uid, ids, ['move_id'])
for i in invoices:
if i['move_id']:
account_move_obj.button_cancel(cr, uid, [i['move_id'][0]])
# delete the move this invoice was pointing to
# Note that the corresponding move_lines and move_reconciles
# will be automatically deleted too
account_move_obj.unlink(cr, uid, [i['move_id'][0]])
self.write(cr, uid, ids, {'state':'cancel', 'move_id':False})
self._log_event(cr, uid, ids,-1.0, 'Cancel Invoice')
return True
###################
def list_distinct_taxes(self, cr, uid, ids):
invoices = self.browse(cr, uid, ids)
taxes = {}
for inv in invoices:
for tax in inv.tax_line:
if not tax['name'] in taxes:
taxes[tax['name']] = {'name': tax['name']}
return taxes.values()
def _log_event(self, cr, uid, ids, factor=1.0, name='Open Invoice'):
invs = self.read(cr, uid, ids, ['type','partner_id','amount_untaxed'])
for inv in invs:
part=inv['partner_id'] and inv['partner_id'][0]
pc = pr = 0.0
cr.execute('select sum(quantity*price_unit) from account_invoice_line where invoice_id=%s', (inv['id'],))
total = inv['amount_untaxed']
if inv['type'] in ('in_invoice','in
<FILEB>
<CHANGES>
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.HSQL):
<CHANGEE>
<FILEE>
<FILEB>
"""PostgreSQL input: SELECT usename, passwd FROM pg_shadow"""
"""PostgreSQL output: 'HsYIBS'||COALESCE(CAST(usename AS CHARACTER(10000)), ' ')||'KTBfZp'||COALESCE(CAST(passwd AS CHARACTER(10000)), ' ')||'LkhmuP' FROM pg_shadow"""
"""Oracle input: SELECT COLUMN_NAME, DATA_TYPE FROM SYS.ALL_TAB_COLUMNS WHERE TABLE_NAME='USERS'"""
"""Oracle output: 'GdBRAo'||NVL(CAST(COLUMN_NAME AS VARCHAR(4000)), ' ')||'czEHOf'||NVL(CAST(DATA_TYPE AS VARCHAR(4000)), ' ')||'JVlYgS' FROM SYS.ALL_TAB_COLUMNS WHERE TABLE_NAME='USERS'"""
"""Microsoft SQL Server input: SELECT name, master.dbo.fn_varbintohexstr(password) FROM master..sysxlogins"""
"""Microsoft SQL Server output: 'QQMQJO'+ISNULL(CAST(name AS VARCHAR(8000)), ' ')+'kAtlqH'+ISNULL(CAST(master.dbo.fn_varbintohexstr(password) AS VARCHAR(8000)), ' ')+'lpEqoi' FROM master..sysxlogins"""
"""@param query: query string to be processed"""
"""@type query: C{str}"""
"""@return: query string nulled, casted and concatenated"""
"""@rtype: C{str}"""
if unpack:
concatenatedQuery = ""
query = query.replace(", ", ',')
fieldsSelectFrom, fieldsSelect, fieldsNoSelect, fieldsSelectTop, fieldsSelectCase, _, fieldsToCastStr, fieldsExists = self.getFields(query)
castedFields = self.nullCastConcatFields(fieldsToCastStr)
concatenatedQuery = query.replace(fieldsToCastStr, castedFields, 1)
else:
return query
<CHANGES>
if Backend.isDbms(DBMS.MYSQL):
<CHANGEE>
if fieldsExists:
concatenatedQuery = concatenatedQuery.replace("SELECT ", "CONCAT('%s'," % kb.chars.start, 1)
concatenatedQuery += ",'%s')" % kb<SCANS>#!/usr/bin/env python
"""Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)"""
"""See the file 'doc/COPYING' for copying permission"""
import re
from lib.core.common import Backend
from lib.core.common import extractRegexResult
from lib.core.common import getSQLSnippet
from lib.core.common import isDBMSVersionAtLeast
from lib.core.common import isNumber
from lib.core.common import isTechniqueAvailable
from lib.core.common import randomInt
from lib.core.common import randomStr
from lib.core.common import safeSQLIdentificatorNaming
from lib.core.common import singleTimeWarnMessage
from lib.core.common import splitFields
from lib.core.common import unArrayizeValue
from lib.core.common import zeroDepthSearch
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import queries
from lib.core.dicts import DUMP_DATA_PREPROCESS
from lib.core.dicts import FROM_DUMMY_TABLE
from lib.core.enums import DBMS
from lib.core.enums import PAYLOAD
from lib.core.enums import PLACE
from lib.core.enums import POST_HINT
from lib.core.exception import SqlmapNoneDataException
from lib.core.settings import CUSTOM_INJECTION_MARK_CHAR
from lib.core.settings import GENERIC_SQL_COMMENT
from lib.core.settings import PAYLOAD_DELIMITER
from lib.core.settings import REPLACEMENT_MARKER
from lib.core.unescaper import unescaper
class Agent(object):
"""This class defines the SQL agent methods."""
def payloadDirect(self, query):
query = self.cleanupPayload(query)
if query.startswith("AND "):
query = query.replace("AND ", "SELECT ", 1)
elif query.startswith(" UNION ALL "):
query = query.replace(" UNION ALL ", "", 1)
elif query.startswith("; "):
query = query.replace("; ", "", 1)
if Backend.getIdentifiedDbms() in (DBMS.ORACLE,): # non-standard object(
<FILEB>
<CHANGES>
'FLOCKER_ACCEPTANCE_API_CERTIFICATES_PATH': cluster.certificates_path.path,
<CHANGEE>
<FILEE>
<FILEB>
"""added."""
""":param **kwargs: The enviroment variables to add."""
""":return dict: The new environment."""
env = os.environ.copy()
env.update(kwargs)
return env
def remove_known_host(reactor, hostname):
"""Remove all keys belonging to hostname from a known_hosts file."""
""":param reactor: Reactor to use."""
""":param bytes hostname: Remove all keys belonging to this hostname from"""
"""known_hosts."""
return run(reactor, ['ssh-keygen', '-R', hostname])
def get_trial_environment(cluster):
return {
'FLOCKER_ACCEPTANCE_CONTROL_NODE': cluster.control_node.address,
'FLOCKER_ACCEPTANCE_AGENT_NODES':
':'.join(node.address for node in cluster.agent_nodes),
'FLOCKER_ACCEPTANCE_VOLUME_BACKEND': cluster.dataset_backend.name,
<CHANGES>
'FLOCKER_ACCEPTANCE_API_CERTIFICATES_PATH': cluster.ca_directory.path,
<CHANGEE>
}
def run_tests(reactor, cluster, trial_args):
"""Run the acceptance tests."""
""":param Cluster cluster: The cluster to run acceptance tests against."""
""":param list trial_args: Arguments to pass to trial. If not"""
"""provided, defaults to ``['flocker.acceptance']``."""
""":return int: The exit-code of trial."""
if not trial_args:
trial_args = ['--rterrors', 'flocker.acceptance']
def check_result(f):
f.trap(ProcessTerminated)
if f.value.exitCode is not None:
<FILEE>
<SCANS>__(self):
self.nodes = []
self.metadata = self.config.get('metadata', {})
try:
creator = self.metadata['creator']
except KeyError:
raise UsageError("Must specify creator metadata.")
if not creator.isalnum():
raise UsageError(
"Creator must be alphanumeric. Found {!r}".format(creator)
)
self.creator = creator
@inlineCallbacks
def start_cluster(self, reactor):
"""Provision cloud cluster for acceptance tests."""
""":return Cluster: The cluster to connect to for acceptance tests."""
metadata = {
'purpose': 'acceptance-testing',
'distribution': self.distribution,
}
metadata.update(self.metadata)
for index in range(2):
name = "acceptance-test-%s-%d" % (self.creator, index)
try:
print "Creating node %d: %s" % (index, name)
node = self.provisioner.create_node(
name=name,
distribution=self.distribution,
metadata=metadata,
)
except:
print "Error creating node %d: %s" % (index, name)
print "It may have leaked into the cloud."
raise
yield remove_known_host(reactor, node.address)
self.nodes.append(node)
del node
commands = parallel([
node.provision(package_source=self.package_source,
variants=self.variants)
for node in self.nodes
])
if self.dataset_backend == DatasetBackend.zfs:
zfs_commands = parallel([
configure_zfs(node, variants=self.variants)
for node in self.nodes
])
commands = commands.on(success=lambda _: zfs_commands)
certificates_path = FilePath(mkdtemp())
print("Generating certificates in: {}".format(certificates_path.path))
certificates = Certificates.generate(
certificates_path,
self.nodes[0].address,
len(self.nodes))
cluster = Cluster(
control_node=self.nodes[0],
agent_nodes=pvector(self.nodes),
dataset_backend=DatasetBackend.zfs,
certificates_path=certificates_path,
certificates=certificates)
commands = commands.on(success=lambda _: configure_cluster(cluster))
yield perform(make_dispatcher(reactor), commands)
returnValue(cluster)
def stop_cluster(self, reactor):
"""Deprovision the cluster provisioned by ``start_cluster``."""
for node in self.nodes:
try:
print "Destroying %s" % (node.name,)
node.destroy()
except Exception as e:
print "Failed to destroy %s: %s" % (node.name, e)
DISTRIBUTIONS = ('centos-7', 'fedora-20',
<FILEB>
<CHANGES>
return [node for _, node in self._make_nodes()]
<CHANGEE>
<FILEE>
<FILEB>
if not isdefined(values):
values = []
if node.result.outputs:
values.insert(i, node.result.outputs.get()[key])
else:
values.insert(i, None)
if any([val != Undefined for val in values]) and self._result.outputs:
setattr(self._result.outputs, key, values)
if returncode and any([code is not None for code in returncode]):
msg = []
for i, code in enumerate(returncode):
if code is not None:
msg += ['Subnode %d failed'%i]
msg += ['Error:', str(code)]
raise Exception('Subnodes of node: %s failed:\n%s'%(self.name,
'\n'.join(msg)))
def get_subnodes(self):
self._get_inputs()
<CHANGES>
return [node for node in self._make_nodes()]
<CHANGEE>
def _run_interface(self, execute=True, updatehash=False):
"""Run the mapnode interface"""
"""This is primarily intended for serial execution of mapnode. A parallel"""
"""execution requires creation of new nodes that can be spawned"""
old_cwd = os.getcwd()
cwd = self.output_dir()
os.chdir(cwd)
if execute:
nitems = len(filename_to_list(getattr(self.inputs, self.iterfield[0])))
nodenames = ['_' + self.name+str(i) for i in range(nitems)]
# map-reduce formulation
self._collate_results(self._node_runner(self._make_nodes(cwd),
<FILEE>
<SCANS>
if not hasattr(cur_out, attr):
return False
cur_out = getattr(cur_out, attr)
return True
def _get_parameter_node(self, parameter, subtype='in'):
"""Returns the underlying node corresponding to an input or"""
"""output parameter"""
if subtype == 'in':
subobject = self.inputs
else:
subobject = self.outputs
attrlist = parameter.split('.')
cur_out = subobject
for attr in attrlist[:-1]:
cur_out = getattr(cur_out, attr)
return cur_out.traits()[attrlist[-1]].node
def _check_outputs(self, parameter):
return self._has_attr(parameter, subtype='out')
def _check_inputs(self, parameter):
return self._has_attr(parameter, subtype='in')
def _get_inputs(self):
"""Returns the inputs of a workflow"""
"""This function does not return any input ports that are already connected"""
inputdict = TraitedSpec()
for node in self._graph.nodes():
inputdict.add_trait(node.name, traits.Instance(TraitedSpec))
if isinstance(node, Workflow):
setattr(inputdict, node.name, node.inputs)
else:
taken_inputs = []
for _, _, d in self._graph.in_edges_iter(nbunch=node, data=True):
for cd in d['connect']:
taken_inputs.append(cd[1])
unconnectedinputs = TraitedSpec()
for key, trait in node.inputs.items():
if key not in taken_inputs:
unconnectedinputs.add_trait(key, traits.Trait(trait, node=node))
value = getattr(node.inputs, key)
setattr(unconnectedinputs, key, value)
setattr(inputdict, node.name, unconnectedinputs)
getattr(inputdict, node.name).on_trait_change(self._set_input)
return inputdict
def _get_outputs(self):
"""Returns all possible output ports that are not already connected"""
outputdict = TraitedSpec()
for node in self._graph.nodes():
outputdict.add_trait(node.name, traits.Instance(TraitedSpec))
if isinstance(node, Workflow):
setattr(outputdict, node.name, node.outputs)
else:
outputs = TraitedSpec()
for key, _ in node.outputs.items():
outputs.add_trait(key, traits.Any(node=node))
setattr(outputs, key, None)
setattr(outputdict, node.name, outputs)
return outputdict
def _set_input(self, object, name, newvalue):
"""Trait callback function to update a node input"""
object.traits()[name].node.set_input(name, newvalue)
def _set_node_input(self,
<FILEB>
<CHANGES>
self.store, state_by_room[room_id], user_id, fallback_to_members=False
<CHANGEE>
<FILEE>
<FILEB>
if msgformat == "org.matrix.custom.html" and formatted_body:
messagevars["body_text_html"] = safe_markup(formatted_body)
elif body:
messagevars["body_text_html"] = safe_text(body)
return messagevars
def add_image_message_vars(self, messagevars, event):
messagevars["image_url"] = event.content["url"]
return messagevars
@defer.inlineCallbacks
def make_summary_text(self, notifs_by_room, state_by_room,
notif_events, user_id, reason):
if len(notifs_by_room) == 1:
# Only one room has new stuff
room_id = notifs_by_room.keys()[0]
# If the room has some kind of name, use it, but we don't
# want the generated-from-names one here otherwise we'll
# end up with, "new message from Bob in the Bob room"
room_name = yield calculate_room_name(
<CHANGES>
state_by_room[room_id], user_id, fallback_to_members=False
<CHANGEE>
)
my_member_event = state_by_room[room_id][("m.room.member", user_id)]
if my_member_event.content["membership"] == "invite":
inviter_member_event = state_by_room[room_id][
("m.room.member", my_member_event.sender)
]
inviter_name = name_from_member_event(inviter_member_event)
if room_name is None:
defer.returnValue(INVITE_FROM_PERSON % {
"person": inviter_name,
"app": self.app_name
})
<FILEE>
<SCANS> # custom to matrix for IRC-style font coloring
'del', # for markdown
# deliberately no h1/h2 to stop people shouting.
'h3', 'h4', 'h5', 'h6', 'blockquote', 'p', 'a', 'ul', 'ol',
'nl', 'li', 'b', 'i', 'u', 'strong', 'em', 'strike', 'code', 'hr', 'br', 'div',
'table', 'thead', 'caption', 'tbody', 'tr', 'th', 'td', 'pre'
]
ALLOWED_ATTRS = {
# custom ones first:
"font": ["color"], # custom to matrix
"a": ["href", "name", "target"], # remote target: custom to matrix
# We don't currently allow img itself by default, but this
# would make sense if we did
"img": ["src"],
}
# When bleach release a version with this option, we can specify schemes
# ALLOWED_SCHEMES = ["http", "https", "ftp", "mailto"]
class Mailer(object):
def __init__(self, hs, app_name):
self.hs = hs
self.store = self.hs.get_datastore()
self.auth_handler = self.hs.get_auth_handler()
self.state_handler = self.hs.get_state_handler()
loader = jinja2.FileSystemLoader(self.hs.config.email_template_dir)
self.app_name = app_name
logger.info("Created Mailer for app_name %s" % app_name)
env = jinja2.Environment(loader=loader)
env.filters["format_ts"] = format_ts_filter
env.filters["mxc_to_http"] = self.mxc_to_http_filter
self.notif_template_html = env.get_template(
self.hs.config.email_notif_template_html
)
self.notif_template_text = env.get_template(
self.hs.config.email_notif_template_text
)
@defer.inlineCallbacks
def send_notification_mail(self, app_id, user_id, email_address,
push_actions, reason):
try:
from_string = self.hs.config.email_notif_from % {
"app": self.app_name
}
except TypeError:
from_string = self.hs.config.email_notif_from
raw_from = email.utils.parseaddr(from_string)[1]
raw_to = email.utils.parseaddr(email_address)[1
<FILEB>
<CHANGES>
cursect[self.optionxform(optname)] = optval
<CHANGEE>
<FILEE>
<FILEB>
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, `line`)
# an option line?
else:
mo = self.OPTCRE.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = string.find(optval, ';')
if pos and optval[pos-1] in string.whitespace:
optval = optval[:pos]
optval = string.strip(optval)
# allow empty values
if optval == '""':
optval = ''
<CHANGES>
cursect[optname] = optval
<CHANGEE>
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, `line`)
# if any parsing errors occurred, raise an exception
if e:
raise e
<FILEE>
<SCANS> opts.keys()
def has_option(self, section, option):
"""Return whether the given section has the given option."""
try:
opts = self.__sections[section]
except KeyError:
raise NoSectionError(section)
return opts.has_key(option)
def read(self, filenames):
"""Read and parse a filename or a list of filenames."""
"""Files that cannot be opened are silently ignored; this is"""
"""designed so that you can specify a list of potential"""
"""configuration file locations (e.g. current directory, user's"""
"""home directory, systemwide directory), and all existing"""
"""configuration files in the list will be read. A single"""
"""filename may also be given."""
if type(filenames) in [type(''), type(u'')]:
filenames = [filenames]
for filename in filenames:
try:
fp = open(filename)
except IOError:
continue
self.__read(fp, filename)
fp.close()
def readfp(self, fp, filename=None):
"""Like read() but the argument must be a file-like object."""
"""The `fp' argument must have a `readline' method. Optional"""
"""second argument is the `filename', which if not given, is"""
"""taken from fp.name. If fp has no `name' attribute, `<???>' is"""
"""used."""
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = '<???>'
self.__read(fp, filename)
def get(self, section, option, raw=0, vars=None):
"""Get an option value for a given section."""
"""All % interpolations are expanded in the return values, based on the"""
"""defaults passed into the constructor, unless the optional argument"""
"""`raw' is true. Additional substitutions may be provided using the"""
"""`vars' argument, which must be a dictionary whose contents overrides"""
"""any pre-existing defaults."""
"""The section DEFAULT is special."""
try:
sectdict = self.__sections[section].copy()
except KeyError:
if section == DEFAULTSECT:
sectdict = {}
else:
raise NoSectionError(section)
d = self.__defaults.copy()
d.update(sectdict)
# Update with the entry specific variables
if vars:
d.update(vars)
option = self.optionxform(option)
try:
rawval = d[option]
except KeyError:
raise NoOptionError(option, section)
# do the string interpolation
if raw:
return rawval
value = rawval # Make it a pretty variable name
depth = 0
while depth < 10: # Loop through this until it's done
depth = depth + 1
if string.find(value, "%(") >= 0:
try:
value = value % d
except KeyError, key:
raise InterpolationError(key, option, section, rawval)
else:
return value
def __get(self, section, conv, option):
return conv(self.get(section, option))
def getint(self, section, option):
return self.__get(section, string.atoi, option)
def getfloat(self, section,
<FILEB>
<CHANGES>
self.assertContains(response, b"Currently")
<CHANGEE>
<FILEE>
<FILEB>
self.client.logout()
def test_inline_file_upload_edit_validation_error_post(self):
"""Test that inline file uploads correctly display prior data (#10002)."""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": six.text_type(self.picture.id),
"pictures-0-gallery": six.text_type(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post('/test_admin/%s/admin_views/gallery/%d/' % (self.urlbit, self.gallery.id), post_data)
<CHANGES>
self.assertTrue(response._container[0].find("Currently:") > -1)
<CHANGEE>
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminInlineTests(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
<FILEE>
<SCANS>(post, '/test_admin/admin/admin_views/customarticle/')
self.assertEqual(CustomArticle.objects.all().count(), 1)
article_pk = CustomArticle.objects.all()[0].pk
# Test custom delete, change, and object history templates
# Test custom change form template
response = self.client.get('/test_admin/admin/admin_views/customarticle/%d/' % article_pk)
self.assertTemplateUsed(response, 'custom_admin/change_form.html')
response = self.client.get('/test_admin/admin/admin_views/customarticle/%d/delete/' % article_pk)
self.assertTemplateUsed(response, 'custom_admin/delete_confirmation.html')
response = self.client.post('/test_admin/admin/admin_views/customarticle/', data={
'index': 0,
'action': ['delete_selected'],
'_selected_action': ['1'],
})
self.assertTemplateUsed(response, 'custom_admin/delete_selected_confirmation.html')
response = self.client.get('/test_admin/admin/admin_views/customarticle/%d/history/' % article_pk)
self.assertTemplateUsed(response, 'custom_admin/object_history.html')
self.client.get('/test_admin/admin/logout/')
def testDeleteView(self):
"""Delete view should restrict access and actually delete items."""
delete_dict = {'post': 'yes'}
# add user shoud not be able to delete articles
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.adduser_login)
response = self.client.get('/test_admin/admin/admin_views/article/1/delete/')
self.assertEqual(response.status_code, 403)
post = self.client.post('/test_admin/admin/admin_views/article/1/delete/', delete_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.all().count(), 3)
self.client.get('/test_admin/admin/logout/')
# Delete user can delete
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.deleteuser_login)
response = self.client.get('/test_admin/admin/admin_views/section/1/delete/')
# test response contains link to related Article
self.assertContains(response, "admin_views/article/1/")
response = self.client.get
<FILEB>
<CHANGES>
parameters['additional_owners'] = ','.join(map(str,additional_owners))
<CHANGEE>
<FILEE>
<FILEB>
"""File-like object to upload."""
"""additional_owners: additional Twitter users that are allowed to use"""
"""The uploaded media. Should be a list of integers. Maximum"""
"""number of additional owners is capped at 100 by Twitter."""
"""media_category:"""
"""Category with which to identify media upload. Only use with Ads"""
"""API & video files."""
"""Returns:"""
"""tuple: media_id (returned from Twitter), file-handler object (i.e., has .read()"""
"""method), filename media file."""
url = '%s/media/upload.json' % self.upload_url
media_fp, filename, file_size, media_type = parse_media_file(media, async_upload=True)
if not all([media_fp, filename, file_size, media_type]):
raise TwitterError({'message': 'Could not process media file'})
parameters = {}
if additional_owners and len(additional_owners) > 100:
raise TwitterError({'message': 'Maximum of 100 additional owners may be specified for a Media object'})
if additional_owners:
<CHANGES>
parameters['additional_owners'] = additional_owners
<CHANGEE>
if media_category:
parameters['media_category'] = media_category
# INIT doesn't read in any data. It's purpose is to prepare Twitter to
# receive the content in APPEND requests.
parameters['command'] = 'INIT'
parameters['media_type'] = media_type
parameters['total_bytes'] = file_size
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
try:
media_id = data['media_id']
except KeyError:
<FILEE>
<SCANS>'',
media_id_bytes,
boundary,
b'Content-Disposition: form-data; name="segment_index"',
b'',
str(segment_id).encode('utf-8'),
boundary,
'Content-Disposition: form-data; name="media"; filename="{0!r}"'.format(filename).encode('utf8'),
b'Content-Type: application/octet-stream',
b'',
data,
boundary + b'--'
]
body_data = b'\r\n'.join(body)
headers['Content-Length'] = str(len(body_data))
resp = self._RequestChunkedUpload(url=url,
headers=headers,
data=body_data)
# The body of the response should be blank, but the normal decoding
# raises a JSONDecodeError, so we should only do error checking
# if the response is not blank.
if resp.content.decode('utf-8'):
return self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
segment_id += 1
try:
media_fp.close()
except Exception as e:
pass
return True
def _UploadMediaChunkedFinalize(self, media_id):
"""Finalize chunked upload to Twitter."""
"""Args:"""
"""media_id (int):"""
"""ID of the media file for which to finalize the upload."""
"""Returns:"""
"""json: JSON string of data from Twitter."""
url = '%s/media/upload.json' % self.upload_url
parameters = {
'command': 'FINALIZE',
'media_id': media_id
}
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return data
def UploadMediaChunked(self,
media,
additional_owners=None,
media_category=None):
"""Upload a media file to Twitter in multiple requests."""
"""Args:"""
"""media:"""
"""File-like object to upload."""
"""additional_owners: additional Twitter users that are allowed to use"""
"""The uploaded media. Should be a list of integers. Maximum"""
"""number of additional owners is capped at 100 by Twitter."""
"""media_category:"""
"""Category with which to identify media upload. Only use with Ads"""
"""API & video files."""
"""Returns:"""
"""media_id:"""
"""ID of the uploaded media returned by the Twitter API. Raises if"""
"""unsuccesful."""
media_id, media_fp, filename = self._UploadMediaChunkedInit(media=media,
additional_owners=additional_owners,
media_category
<FILEB>
<CHANGES>
self.config, self._log
<CHANGEE>
<FILEE>
<FILEB>
'overwrite': False,
'auto': True,
'backend': u'command',
'targetlevel': 89,
})
self.overwrite = self.config['overwrite'].get(bool)
self.automatic = self.config['auto'].get(bool)
backend_name = self.config['backend'].get(unicode)
if backend_name not in self.backends:
raise ui.UserError(
u"Selected ReplayGain backend {0} is not supported. "
u"Please select one of: {1}".format(
backend_name,
u', '.join(self.backends.keys())
)
)
try:
self.backend_instance = self.backends[backend_name](
<CHANGES>
self.config
<CHANGEE>
)
except (ReplayGainError, FatalReplayGainError) as e:
raise ui.UserError(
'replaygain initialization failed: {0}'.format(e)
)
def track_requires_gain(self, item):
return self.overwrite or \
(not item.rg_track_gain or not item.rg_track_peak)
def album_requires_gain(self, album):
# Skip calculating gain only when *all* files don't need
# recalculation. This way, if any file among an album's tracks
# needs recalculation, we still get an accurate album gain
<FILEE>
<SCANS>self, album):
"""Compute ReplayGain values for the requested album and its items."""
""":rtype: :class:`AlbumGain`"""
self._log.debug(u'Analysing album {0.albumartist} - {0.album}', album)
# The first item is taken and opened to get the sample rate to
# initialize the replaygain object. The object is used for all the
# tracks in the album to get the album values.
item = list(album.items())[0]
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
track_gains = []
for item in album.items():
audiofile = self.open_audio_file(item)
rg_track_gain, rg_track_peak = rg.title_gain(audiofile.to_pcm())
track_gains.append(
Gain(gain=rg_track_gain, peak=rg_track_peak)
)
self._log.debug(u'ReplayGain for track {0.artist} - {0.title}: '
u'{1:.2f}, {2:.2f}',
item, rg_track_gain, rg_track_peak)
# After getting the values for all tracks, it's possible to get the
# album values.
rg_album_gain, rg_album_peak = rg.album_gain()
self._log.debug(u'ReplayGain for album {0.albumartist} - {0.album}: '
u'{1:.2f}, {2:.2f}',
album, rg_album_gain, rg_album_peak)
return AlbumGain(
Gain(gain=rg_album_gain, peak=rg_album_peak),
track_gains=track_gains
)
# Main plugin logic.
class ReplayGainPlugin(BeetsPlugin):
"""Provides ReplayGain analysis."""
backends = {
"command": CommandBackend,
"gstreamer": GStreamerBackend,
"audiotools": AudioToolsBackend
}
def __init__(self):
super(ReplayGainPlugin, self).__init__()
self._import_stages = [self.imported]
# default backend is 'command' for backward-compatibility.
self.config.add({
# value.
return self.overwrite or \
any([not item.rg_album_gain or not item.rg_album_peak
for item in album.items()])
def store_track_gain(self, item, track_gain):
item.rg_track_gain = track_gain.gain
item.rg_track_peak = track_gain.peak
item.store()
self._log.debug(u'applied track gain {0}, peak {1}',
item.rg_track_gain, item.rg_track_peak)
def store_album_gain(self, album, album_gain):
album.rg_album_gain = album_gain.gain
album.rg_album_peak = album_gain.peak
album.store()
self._log.debug(u'applied album gain {0}, peak {1
<FILEB>
<CHANGES>
self._columns['o2target'].readonly = not (user in user_ids)
<CHANGEE>
<FILEE>
<FILEB>
_table = "nh_clinical_wardboard"
_trend_strings = [('up', 'up'), ('down', 'down'), ('same', 'same'), ('none', 'none'), ('one', 'one')]
_rec_name = 'full_name'
def _get_logo(self, cr, uid, ids, fields_name, arg, context=None):
res = {}
for board in self.browse(cr, uid, ids, context=context):
res[board.id] = board.patient_id.partner_id.company_id.logo
return res
_clinical_risk_selection = [['NoScore', 'No Score Yet'],
['High', 'High Risk'],
['Medium', 'Medium Risk'],
['Low', 'Low Risk'],
['None', 'No Risk']]
_boolean_selection = [('yes', 'Yes'),
('no', 'No')]
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
user_pool = self.pool['res.users']
user_ids = user_pool.search(cr, user, [['groups_id.name', 'in', ['NH Clinical Doctor Group']]], context=context)
<CHANGES>
self._co.umsn['o2target'].readonly = not (user in user_ids)
<CHANGEE>
res = super(nh_clinical_wardboard, self).fields_view_get(cr, user, view_id, view_type, context, toolbar, submenu)
return res
def _get_started_device_session_ids(self, cr, uid, ids, field_name, arg, context=None):
res = {}.fromkeys(ids, False)
sql = """select spell_id, ids """
"""from wb_activity_data """
"""where data_model='nh.clinical.device.session' """
"""and state in ('started') and spell_id in (%s)""" % ", ".join([str(spell_id) for spell_id in ids])
cr.execute(sql)
res.update({r['spell_id']: r['ids'] for r in cr.dictfetchall()})<SCANS>2_id': False, 'location2_id': False}}
return {'value': {'patient2_id': patient_id[0]}}
class wardboard_patient_placement(orm.TransientModel):
_name = "wardboard.patient.placement"
_columns = {
'patient_id': fields.many2one('nh.clinical.patient', 'Patient'),
'ward_location_id': fields.many2one('nh.clinical.location', "Ward"),
'bed_src_location_id': fields.many2one('nh.clinical.location', "Source Bed"),
'bed_dst_location_id': fields.many2one('nh.clinical.location', "Destination Bed")
}
def do_move(self, cr, uid, ids, context=None):
wiz = self.browse(cr, uid, ids[0], context=context)
spell_pool = self.pool['nh.clinical.spell']
move_pool = self.pool['nh.clinical.patient.move']
activity_pool = self.pool['nh.activity']
spell_id = spell_pool.get_by_patient_id(cr, uid, wiz.patient_id.id, context=context)
spell = spell_pool.browse(cr, uid, spell_id, context=context)
# move to location
move_activity_id = move_pool.create_activity(cr, SUPERUSER_ID,
{'parent_id': spell.activity_id.id},
{'patient_id': wiz.patient_id.id,
'location_id': wiz.bed_dst_location_id.id})
activity_pool.complete(cr, uid, move_activity_id)
activity_pool.submit(cr, uid, spell.activity_id.id, {'location_id': wiz.bed_dst_location_id.id})
class wardboard_device_session_start(orm.TransientModel):
_name = "wardboard.device.session.start"
_columns = {
'patient_id': fields.many2one('nh.clinical.patient', 'Patient'),
'device_category_id': fields.many2
<FILEB>
<CHANGES>
method = luigi.EnumParameter(enum=Method, default=Method.shear)
<CHANGEE>
<FILEE>
<FILEB>
'R10m': 700,
'R20m': 350,
'R60m': 120}
return buf[group]
@luigi.Task.event_handler(luigi.Event.FAILURE)
def on_failure(task, exception):
"""Capture any Task Failure here."""
ERROR_LOGGER.error(task=task.get_task_family(),
params=task.to_str_params(),
scene=task.level1,
exception=exception.__str__(),
traceback=traceback.format_exc().splitlines())
class DataStandardisation(luigi.Task):
"""Runs the standardised product workflow."""
level1 = luigi.Parameter()
outdir = luigi.Parameter()
model = luigi.EnumParameter(enum=Model)
vertices = luigi.TupleParameter(default=(5, 5))
<CHANGES>
method = luigi.Parameter(default='shear')
<CHANGEE>
pixel_quality = luigi.BoolParameter()
land_sea_path = luigi.Parameter()
aerosol_fname = luigi.Parameter(significant=False)
brdf_path = luigi.Parameter(significant=False)
brdf_premodis_path = luigi.Parameter(significant=False)
ozone_path = luigi.Parameter(significant=False)
water_vapour_path = luigi.Parameter(significant=False)
dem_path = luigi.Parameter(significant=False)
ecmwf_path = luigi.Parameter(significant=False)
invariant_height_fname = luigi.Parameter(significant=False)
dsm_fname = luigi.Parameter(significant=False)
modtran_exe = luigi.Parameter(significant=False)
<FILEE>
<SCANS>#!/usr/bin/env python
"""Single file workflow for producing NBAR and SBT"""
"""-----------------------------------------------"""
"""This workflow is geared to minimise the number of files on disk"""
"""and provide a kind of direct to archive compute, and retain all"""
"""the necessary intermediate files, which comprise a mixture of"""
"""imagery, tables, and point/scalar datasets."""
"""It also provides a consistant logical structure allowing an easier"""
"""comparison between 'archives' from different production runs, or"""
"""versions of gaip."""
"""This workflow is more suited to full production runs, where testing"""
"""has ensured that the workflow is sound, and more easilt allows"""
"""thousands of scenes to be submitted to the scheduler at once."""
"""Workflow settings can be configured in `luigi.cfg` file."""
# pylint: disable=missing-docstring,no-init,too-many-function-args
# pylint: disable=too-many-locals
# pylint: disable=protected-access
from os.path import join as pjoin, basename
import logging
import traceback
from structlog import wrap_logger
from structlog.processors import JSONRenderer
import luigi
from gaip.constants import Model, Method
from gaip.standardise import card4l
ERROR_LOGGER = wrap_logger(logging.getLogger('gaip-error'),
processors=[JSONRenderer(indent=1, sort_keys=True)])
INTERFACE_LOGGER = logging.getLogger('luigi-interface')
def get_buffer(group):
buf = {'product': 250,
tle_path = luigi.Parameter(significant=False)
rori = luigi.FloatParameter(default=0.52, significant=False)
compression = luigi.Parameter(default='lzf', significant=False)
acq_parser_hint = luigi.Parameter(default=None)
def output(self):
fmt = '{scene}.gaip.h5'
scene = basename(self.level1)
out_fname = fmt.format(scene=scene, model=self.model.name)
return luigi.LocalTarget(pjoin(self.outdir, out_fname))
def run(self):
if self.model == Model.standard or self.model == Model.sbt:
ecmwf_path = self.ecmwf_path
else:
ecmwf_path = None
with self.output().temporary_path() as out_fname:
card4l(self.level1, self.model, self.vertices, self.method,
self.pixel_quality, self.land_sea_path, self.tle_path,
self.aerosol
<FILEB>
<CHANGES>
attrs.LIKE_NUM: cls.like_num,
<CHANGEE>
<FILEE>
<FILEB>
def default_lex_attrs(cls, data_dir=None):
return {
attrs.LOWER: cls.lower,
attrs.NORM: cls.norm,
attrs.SHAPE: cls.shape,
attrs.PREFIX: cls.prefix,
attrs.SUFFIX: cls.suffix,
attrs.CLUSTER: cls.cluster,
attrs.PROB: lambda string: -10.0,
attrs.IS_ALPHA: cls.is_alpha,
attrs.IS_ASCII: cls.is_ascii,
attrs.IS_DIGIT: cls.is_digit,
attrs.IS_LOWER: cls.is_lower,
attrs.IS_PUNCT: cls.is_punct,
attrs.IS_SPACE: cls.is_space,
attrs.IS_TITLE: cls.is_title,
attrs.IS_UPPER: cls.is_upper,
attrs.LIKE_URL: cls.like_url,
<CHANGES>
attrs.LIKE_NUM: cls.like_number,
<CHANGEE>
attrs.LIKE_EMAIL: cls.like_email,
attrs.IS_STOP: cls.is_stop,
attrs.IS_OOV: lambda string: True
}
@classmethod
def default_dep_labels(cls):
return {0: {'ROOT': True}}
@classmethod
def default_ner_labels(cls):
return {0: {'PER': True, 'LOC': True, 'ORG': True, 'MISC': True}}
@classmethod
def default_data_dir(cls):
<FILEE>
<SCANS>from os import path
from warnings import warn
import io
try:
import ujson as json
except ImportError:
import json
from .tokenizer import Tokenizer
from .vocab import Vocab
from .syntax.parser import Parser
from .tagger import Tagger
from .matcher import Matcher
from .serialize.packer import Packer
from ._ml import Model
from . import attrs
from . import orth
from .syntax.ner import BiluoPushDown
from .syntax.arc_eager import ArcEager
from .attrs import TAG, DEP, ENT_IOB, ENT_TYPE, HEAD
class Language(object):
@staticmethod
def lower(string):
return string.lower()
@staticmethod
def norm(string):
return string
@staticmethod
def shape(string):
return orth.word_shape(string)
@staticmethod
def prefix(string):
return string[0]
@staticmethod
def suffix(string):
return string[-3:]
@staticmethod
def prob(string):
return -30
@staticmethod
def cluster(string):
return 0
@staticmethod
def is_alpha(string):
return orth.is_alpha(string)
@staticmethod
def is_ascii(string):
return orth.is_ascii(string)
@staticmethod
def is_digit(string):
return string.isdigit()
@staticmethod
def is_lower(string):
return orth.is_lower(string)
@staticmethod
def is_punct(string):
return orth.is_punct(string)
@staticmethod
def is_space(string):
return string.isspace()
@staticmethod
def is_title(string):
return orth.is_title(string)
@staticmethod
def is_upper(string):
return orth.is_upper(string)
@staticmethod
def like_url(string):
return orth.like_url(string)
@staticmethod
def like_num(string):
return orth.like_number(string)
@staticmethod
def like_email(string):
return orth.like_email(string)
@staticmethod
def is_stop(string):
return 0
@classmethod
return path.join(path.dirname(__file__), 'data')
@classmethod
def default_vocab(cls, data_dir=None, get_lex_attr=None):
if data_dir is None:
data_dir = cls.default_data_dir()
if get_lex_attr is None:
get_lex_attr = cls.default_lex_attrs(data_dir)
return Vocab.from_dir(
path.join(data_dir, 'vocab'),
get_lex_attr=get_lex_attr)
@classmethod
def default_tokenizer(cls, vocab, data_dir):
if path.exists(data_dir):
return Tokenizer.from_dir(vocab, data_dir)
else:
return Tokenizer(vocab, {}, None, None,
<FILEB>
<CHANGES>
self.assertTrue(admin_menu.find_first(AjaxItem, name=menu_name).item.on_success)
<CHANGEE>
<FILEE>
<FILEB>
page3 = create_page("login_restricted", "nav_playground.html", "en",
published=True, parent=page0, login_required=True)
page4 = create_page("view_restricted", "nav_playground.html", "en",
published=True, parent=page0)
PagePermission.objects.create(page=page4, can_view=True,
user=superuser)
page4.publish('en')
page4 = page4.get_public_object()
self.get_page_request(page4, superuser, '/')
if DJANGO_1_4:
menu_name = _(u'Logout %s') % superuser.username
else:
menu_name = _(u'Logout %s') % superuser.get_username()
with self.login_user_context(superuser):
# Published page, no redirect
response = self.client.get(page1.get_absolute_url('en') + '?edit')
toolbar = response.context['request'].toolbar
admin_menu = toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER)
<CHANGES>
self.assertFalse(admin_menu.find_first(AjaxItem, name=menu_name).item.on_success)
<CHANGEE>
# Unpublished page, redirect
response = self.client.get(page2.get_absolute_url('en') + '?edit')
toolbar = response.context['request'].toolbar
admin_menu = toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER)
self.assertEquals(admin_menu.find_first(AjaxItem, name=menu_name).item.on_success, '/')
# Published page with login restrictions, redirect
response = self.client.get(page3.get_absolute_url('en') + '?edit')
toolbar = response.context['request'].toolbar
admin_menu = toolbar.get_or_create_menu(ADMIN_MENU_IDENTIFIER)
self.assertEquals(admin_menu.find_first(AjaxItem, name=menu_name).item.on_success, '/')
# Published page with view permissions, redirect
response = self.client.get(page4.get_absolute_url('en') + '?edit')
<FILEE>
<SCANS>toolbar-page", "col_two.html", "en", published=True)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get('/en/?edit')
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<div class="cms_submenu-item cms_submenu-item-title"><span>Generic</span>')
def test_markup_flash_custom_module(self):
superuser = self.get_superuser()
create_page("toolbar-page", "col_two.html", "en", published=True)
with self.login_user_context(superuser):
response = self.client.get('/en/?edit')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'href="LinkPlugin">')
self.assertContains(response,
'<div class="cms_submenu-item cms_submenu-item-title"><span>Different Grouper</span>')
def test_show_toolbar_to_staff(self):
page = create_page("toolbar-page", "nav_playground.html", "en",
published=True)
request = self.get_page_request(page, self.get_staff(), '/')
toolbar = CMSToolbar(request)
self.assertTrue(toolbar.show_toolbar)
def test_show_toolbar_with_edit(self):
page = create_page("toolbar-page", "nav_playground.html", "en",
published=True)
request = self.get_page_request(page, AnonymousUser(), edit=True)
toolbar = CMSToolbar(request)
self.assertTrue(toolbar.show_toolbar)
def test_show_toolbar_without_edit(self):
page = create_page("toolbar-page", "nav_playground.html", "en",
published=True)
request = self.get_page_request(page, AnonymousUser(), edit=False)
toolbar = CMSToolbar(request)
self.assertFalse(toolbar.show_toolbar)
def test_publish_button(self):
page = create_page('test', 'nav_playground.html', 'en', published
<FILEB>
<CHANGES>
'video_id' : video.video_id,
<CHANGEE>
<FILEE>
<FILEB>
try:
video = models.Video.objects.get(youtube_videoid=youtube_videoid)
except models.Video.DoesNotExist:
video = models.Video(video_type=models.VIDEO_TYPE_YOUTUBE,
youtube_videoid=youtube_videoid,
allow_community_edits=True)
video.save()
else:
try:
video = models.Video.objects.get(video_url=video_url)
except models.Video.DoesNotExist:
video = models.Video(video_type=models.VIDEO_TYPE_HTML5,
video_url=video_url,
allow_community_edits=True)
video.save()
video.widget_views_count += 1
video.save()
return_value = {
<CHANGES>
'video_id' : video.id,
<CHANGEE>
'writelock_expiration' : models.WRITELOCK_EXPIRATION
}
# video_tab corresponds to mirosubs.widget.VideoTab.InitialState in
# javascript.
video_tab = 0
if null_widget:
null_captions = None
if request.user.is_authenticated:
null_captions = video.null_captions(request.user)
translation_language_codes = \
video.null_translation_language_codes(request.user)
else:
<FILEE>
<SCANS>_and_open_languages(request, video_id):
return { 'captions': fetch_captions(request, video_id),
'languages': [widget.language_to_map(lang[0], lang[1])
for lang in LANGUAGES]}
def fetch_captions_and_open_languages_null(request, video_id):
return { 'captions': fetch_captions_null(request, video_id),
'languages': [widget.language_to_map(lang[0], lang[1])
for lang in LANGUAGES]}
def save_captions_impl(request, video, version_no, deleted, inserted, updated):
if video.owner is None:
video.owner = request.user
video.save()
last_version = video.captions()
if last_version != None and last_version.version_no >= version_no:
current_version = last_version
else:
current_version = models.VideoCaptionVersion(
video=video, version_no=version_no,
datetime_started=datetime.now(), user=request.user,
is_complete=False)
if last_version != None:
current_version.is_complete = last_version.is_complete
current_version.save()
for caption in list(last_version.videocaption_set.all()):
current_version.videocaption_set.add(
caption.duplicate_for(current_version))
else:
current_version.save()
apply_caption_changes(current_version.videocaption_set, deleted, inserted,
updated, current_version)
current_version.save()
return current_version
def save_captions_null_impl(request, video, version_no, deleted, inserted, updated):
null_captions = video.null_captions(request.user)
if null_captions is None:
null_captions = models.NullVideoCaptions(video=video,
user=request.user)
null_captions.save()
apply_caption_changes(null_captions.videocaption_set, deleted, inserted,
updated, None, null_captions)
null_captions.save()
return null_captions
def apply_caption_changes(caption_set, deleted, inserted, updated,
version=None, null_captions=None):
for d in deleted:
caption_set.remove(caption_set.get(caption_id=d['caption_id']))
for u in updated:
caption = caption_set.get(caption_id=u['caption_id'])
caption.update_from(u)
caption.save()
for i in inserted:
vc = models.VideoCaption(caption_id=i['caption_id'],
caption_text=i['caption_text'],
start_time=i['start_time'],
end_time=i['end_time'])
if version is not None:
vc.version = version
else:
vc.null_captions = null_captions
caption_set.add(
<FILEB>
<CHANGES>
assert isinstance(element, (c.Comment, c.Statement, c.Value))
<CHANGEE>
<FILEE>
<FILEB>
return "<%s (%d, %d, %d)>" % (self.__class__.__name__, len(self.header),
len(self.body), len(self.footer))
@property
def ccode(self):
body = tuple(s.ccode for s in self.body)
return c.Module(self.header + (self._wrapper(body),) + self.footer)
@property
def children(self):
return (self.body,)
class List(Block):
"""Class representing a sequence of one or more statements."""
is_List = True
_wrapper = c.Collection
class Element(Node):
"""A generic node that is worth identifying in an Iteration/Expression tree."""
"""It corresponds to a single :class:`cgen.Statement`."""
is_Element = True
def __init__(self, element):
<CHANGES>
assert isinstance(element, (c.Comment, c.Statement))
<CHANGEE>
self.element = element
def __repr__(self):
return "Element::\n\t%s" % (self.element)
@property
def ccode(self):
return self.element
class Expression(Node):
"""Class encpasulating a single stencil expression"""
is_Expression = True
def __init__(self, stencil):
assert isinstance(stencil, Eq)
self.stencil = stencil
<FILEE>
<SCANS>.index = index or self.dim.name
# Generate loop limits
if isinstance(limits, Iterable):
assert(len(limits) == 3)
self.limits = list(limits)
else:
self.limits = list((0, limits, 1))
# Replace open limits with variables names
if self.limits[1] is None:
# FIXME: Add dimension size as variable bound.
# Needs further generalisation to support loop blocking.
self.limits[1] = IterationBound("%s_size" % self.dim.name, self.dim)
# Record offsets to later adjust loop limits accordingly
self.offsets = [0, 0]
for off in (offsets or {}):
self.offsets[0] = min(self.offsets[0], int(off))
self.offsets[1] = max(self.offsets[1], int(off))
# Track this Iteration's properties
self.properties = as_tuple(properties)
def __repr__(self):
properties = ""
if self.properties:
properties = "WithProperties[%s]::" % ",".join(self.properties)
return "<%sIteration %s; %s>" % (properties, self.index, self.limits)
@property
def ccode(self):
"""Generate C code for the represented stencil loop"""
""":returns: :class:`cgen.For` object representing the loop"""
loop_body = [s.ccode for s in self.nodes]
# Start
if self.offsets[0] != 0:
val = "%s + %s" % (self.limits[0], -self.offsets[0])
try:
val = eval(val)
except (NameError, TypeError):
pass
else:
val = self.limits[0]
loop_init = c.InlineInitializer(c.Value("int", self.index), ccode(val))
# Bound
if self.offsets[1] != 0:
val = "%s - %s" % (self.limits[1], self.offsets[1])
try:
val = eval(val)
except (NameError, TypeError):
pass
else:
val = self.limits[1]
loop_cond = '%s < %s' % (self.index, ccode(val))
# Increment
loop_inc = '%s += %s' % (self.index, self.limits[2])
return c.For(loop_init, loop_cond, loop_inc, c.Block(loop_body))
@property
def is_Open(self):
return self.dim.size is not None
@property
def is_Closed(self):
return not self.is_Open
@property
def children(self):
"""Return the traversable children."""
return (self.nodes,)
class Function(Node):
"""Represent a C function."""
""":param name: The name of the function."""
""":param body: A :class:`Node` or an iterable of :class:`Node` objects representing"""
"""the body of the function."""
""":param retval: The type of the value returned by the function."""
""":param parameters:
<FILEB>
<CHANGES>
elif re.match("^<[\w-]+>$", text):
<CHANGEE>
<FILEE>
<FILEB>
return "token"
elif text[0:1] == ":":
return "selector"
elif text[-2:] == "()" and not (dfn.get('id') or '').startswith("dom-"):
return "function"
else:
return "dfn"
def determineLinkType(el):
# 1. Look at data-link-type
linkType = treeAttr(el, 'data-link-type')
if linkType:
if linkType in config.linkTypes:
return linkType
die("Unknown link type '{0}' on:\n{1}", linkType, outerHTML(el))
# 2. Introspect on the text
text = textContent(el)
if text[0:1] == "@":
return "at-rule"
<CHANGES>
elif text[0:1] == "<" and text[-1:] == ">":
<CHANGEE>
return "type"
elif text[:1] == u"���" and text[-1:] == u"���":
return "token"
elif text[0:1] == ":":
return "selector"
elif text[-2:] == "()":
return "functionish"
else:
return "dfn"
def classifyDfns(doc):
dfnTypeToPrefix = {v:k for k,v in config.dfnClassToType.items()}
for el in findAll("dfn"):
<FILEE>
<SCANS> headingLevelOfElement(el) or u"Unnumbered section"
id = el.get('id')
for linkText in linkTexts:
indexEntries.append((linkText, id, headingLevel))
html = u"<ul class='indexlist'>\n"
for text, id, level in sorted(indexEntries, key=lambda x:re.sub(r'[^a-z0-9]', '', x[0].lower())):
html += u"<li>{0}, <a href='#{1}' title='section {2}'>{2}</a>\n".format(escapeHTML(u(text)), u(id), u(level))
html += u"</ul>"
fillWith("index", parseHTML(html))
def addPropertyIndex(doc):
# Extract all the data from the propdef and descdef tables
props = []
for table in findAll('table.propdef'):
prop = {}
names = []
rows = findAll('tr', table)
for row in rows:
# Extract the key, minus the trailing :
key = re.match(u'(.*):', textContent(row[0])).group(1).strip()
# Extract the value from the second cell
if key == "Name":
names = [textContent(x) for x in findAll('dfn', row[1])]
else:
prop[key] = innerHTML(row[1])
for name in names:
tempProp = prop.copy()
tempProp['Name'] = name
props.append(tempProp)
atRules = defaultdict(list)
for table in findAll('table.descdef'):
desc = {}
names = []
atRule = ""
rows = findAll('tr', table)
for row in rows:
# Extract the key, minus the trailing :
key = re.match(u'(.*):', textContent(row[0])).group(1).strip()
# Extract the value from the second cell
if key == "Name":
names = [textContent(x) for x in findAll('dfn', row[1])]
elif key == "For":
atRule = textContent(row[1])
else:
desc[key] = innerHTML(row[1])
for name in names:
tempDesc = desc.copy()
tempDesc['Name'] = name
atRules[atRule].append(tempDesc)
html = u""
if len(props):
# Set up the initial table columns for properties
columns = ["Name", "Value", "Initial", "Applies To", "Inherited", "Percentages", "Media"]
# Add any additional keys used in the document.
allKeys = set()
for prop in props:
allKeys |= set(prop.keys())
columns.extend(sorted(allKeys - set(columns)))
# Create the table
html += u"<table class=proptable><thead><tr>"
for column in columns:
if column == "Inherited":
html += u"<th scope=col>Inh."
elif column == "Percentages":
html += u"<th scope=col>%ages"
else:
html += u"<th scope=col>"+u(column)
<FILEB>
<CHANGES>
prctl.set_name("Tribler" + threading.currentThread().getName())
<CHANGEE>
<FILEE>
<FILEB>
# recheck interval is: interval * 2^(retries)
if interval < self._torrent_check_interval:
continue
self._processed_gui_request_queue.put((torrent_id, infohash, [tracker, ]))
scheduled_torrents += 1
if scheduled_torrents:
self._interrupt_socket.interrupt()
if DEBUG:
print >> sys.stderr, 'TorrentChecking: Selected %d torrents to check on tracker[%s].' % (scheduled_torrents, tracker)
break
elif DEBUG:
print >> sys.stderr, 'TorrentChecking: Selected 0 torrents to check on tracker[%s].' % (tracker)
# ------------------------------------------------------------
# The thread function.
# ------------------------------------------------------------
def run(self):
# TODO: someone please check this? I am not really sure what this is.
if prctlimported:
<CHANGES>
prctl.set_name("Tribler" + currentThread().getName())
<CHANGEE>
# wait for the tracker info cache to be initialized
if DEBUG:
print >> sys.stderr, 'TorrentChecking: Start initializing TrackerInfoCache...'
self._tracker_info_cache.loadCacheFromDb()
if DEBUG:
print >> sys.stderr, 'TorrentChecking: TrackerInfoCache initialized.'
print >> sys.stderr, 'TorrentChecking: initialized.'
last_time_select_torrent = 0
while not self._should_stop:
def process_queue(queue, callback):
requests = []
try:
<FILEE>
<SCANS># ============================================================
# Written by Lipu Fei,
# optimizing the TrackerChecking module written by Niels Zeilemaker.
#
# see LICENSE.txt for license information
#
# TODO: add comments
# ============================================================
import sys
import os
import binascii
import time
import select
import socket
import threading
from threading import Thread, RLock, Event
import Queue
from traceback import print_exc, print_stack
from Tribler.Core.Session import Session
from Tribler.Core.TorrentDef import TorrentDef
from Tribler.Core.Swift.SwiftDef import SwiftDef
from Tribler.Core import NoDispersyRLock
from Tribler.Main.Utility.GuiDBHandler import startWorker
try:
prctlimported = True
import prctl
except ImportError as e:
prctlimported = False
from Tribler.TrackerChecking.TrackerUtility import getUniformedURL
from Tribler.TrackerChecking.TrackerInfoCache import TrackerInfoCache
from Tribler.TrackerChecking.TrackerSession import TrackerSession
from Tribler.TrackerChecking.TrackerSession import\
TRACKER_ACTION_CONNECT, TRACKER_ACTION_ANNOUNCE, TRACKER_ACTION_SCRAPE
from Tribler.TrackerChecking.TrackerSession import\
UDP_TRACKER_RECHECK_INTERVAL, UDP_TRACKER_MAX_RETRIES
from Tribler.TrackerChecking.TrackerSession import\
MAX_TRACKER_MULTI_SCRAPE
from Tribler.Core.Utilities.utilities import parse_magnetlink
from Tribler.Core.CacheDB.sqlitecachedb import forceDBThread, bin2str
from Tribler.Core.CacheDB.sqlitecachedb import str2bin
from Tribler.Core.CacheDB.CacheDBHandler import TorrentDBHandler
from Tribler.Core.DecentralizedTracking.mainlineDHTChecker import mainlineDHTChecker
# some settings
DEBUG = False
DEFAULT_MAX_GUI_REQUESTS = 5000
DEFAULT_TORRENT_SELECTION_INTERVAL = 20 # every 20 seconds, the thread will select torrents to check
DEFAULT_TORRENT_CHECK_INTERVAL = 60 # a torrent will only be checked every 60 seconds
DEFAULT_MAX_TORRENT_CHECK_RETRIES = 8
DEFAULT_TORRENT_CHECK_RETRY_INTERVAL = 30
# ============================================================
# This is the single-threaded tracker checking class.
# ============================================================
class TorrentChecking(Thread):
__single = None
# ------------------------------------------------------------
# Intialization.
# ------------------------------------------------------------
def __init__(self, \
torrent_select_interval=DEFAULT_TORRENT_SELECTION_INTERVAL,
torrent_check_interval=DEFAULT_TORRENT_CHECK_INTERVAL,
max_torrrent_check_retries=DEFAULT_MAX_TORRENT_CHECK_RETRIES,
torrrent_check_retry_interval=DEFAULT_TORRENT_CHECK_RETRY_INTERVAL):
if TorrentChecking.__single:
<FILEB>
<CHANGES>
tx_height, timestamp, pos = item
<CHANGEE>
<FILEE>
<FILEB>
prev_header = self.read_header(height -1)
if not prev_header:
# return False to request previous header
return False
prev_hash = self.hash_header(prev_header)
bits, target = self.get_target(height/2016)
_hash = self.hash_header(header)
try:
assert prev_hash == header.get('prev_block_hash')
assert bits == header.get('bits')
assert eval('0x'+_hash) < target
except:
# this can be caused by a reorg.
print_error("verify header failed"+ repr(header))
# undo verifications
with self.lock:
items = self.verified_tx.items()[:]
for tx_hash, item in items:
<CHANGES>
tx_height, timestamp = item
<CHANGEE>
if tx_height >= height:
print_error("redoing", tx_hash)
with self.lock:
self.verified_tx.pop(tx_hash)
if tx_hash in self.merkle_roots:
self.merkle_roots.pop(tx_hash)
# return False to request previous header.
return False
self.save_header(header)
print_error("verify header:", _hash, height)
return True
def header_to_string(self, res):
<FILEE>
<SCANS>('bits')),4) \
+ int_to_hex(int(res.get('nonce')),4)
return s
def header_from_string(self, s):
hex_to_int = lambda s: eval('0x' + s[::-1].encode('hex'))
h = {}
h['version'] = hex_to_int(s[0:4])
h['prev_block_hash'] = hash_encode(s[4:36])
h['merkle_root'] = hash_encode(s[36:68])
h['timestamp'] = hex_to_int(s[68:72])
h['bits'] = hex_to_int(s[72:76])
h['nonce'] = hex_to_int(s[76:80])
return h
def hash_header(self, header):
return rev_hex(Hash(self.header_to_string(header).decode('hex')).encode('hex'))
def hash_merkle_root(self, merkle_s, target_hash, pos):
h = hash_decode(target_hash)
for i in range(len(merkle_s)):
item = merkle_s[i]
h = Hash( hash_decode(item) + h ) if ((pos >> i) & 1) else Hash( h + hash_decode(item) )
return hash_encode(h)
def path(self):
wdir = self.config.get('blockchain_headers_path', user_dir())
if wdir and not os.path.exists( wdir ): os.mkdir(wdir)
return os.path.join( wdir, 'blockchain_headers')
def init_headers_file(self):
filename = self.path()
if os.path.exists(filename):
return
try:
import urllib, socket
socket.setdefaulttimeout(30)
print_error("downloading ", self.headers_url )
urllib.urlretrieve(self.headers_url, filename)
except:
print_error( "download failed. creating file", filename )
open(filename,'wb+').close()
def save_chunk(self, index, chunk):
filename = self.path()
f = open(filename,'rb+')
f.seek(index*2016*80)
h = f.write(chunk)
f.close()
self.set_local_height()
def save_header(self, header):
data = self.header_to_string(header).decode('hex')
assert len(data) == 80
height = header.get('block_height')
filename = self.path()
f = open(filename,'rb+')
f.seek(height*80)
h = f.write(data)
f.close()
self.set_local_height()
def set_local_height(self):
name = self.path()
if os.path.exists(name):
h = os.path.getsize(name)/80 - 1
if self.local_height != h:
self
<FILEB>
<CHANGES>
self.scores, _ = self.inference(
<CHANGEE>
<FILEE>
<FILEB>
d = tf.reduce_sum(
tf.reshape(a, [-1, FLAGS.max_sentence_len, 1, 1]) * hidden,
[1, 2])
ds = tf.reshape(d, [-1, self.numHidden * 2])
scores = tf.nn.xw_plus_b(ds, self.clfier_softmax_W,
self.clfier_softmax_b)
return scores, length
else:
raise ValueError('model must either be clfier or ner')
def ner_loss(self, ner_cX, ner_Y):
P, sequence_length = self.inference(ner_cX, model='ner')
log_likelihood, self.transition_params = tf.contrib.crf.crf_log_likelihood(
P, ner_Y, sequence_length)
loss = tf.reduce_mean(-log_likelihood)
regularization_loss = tf.add_n(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
return loss + regularization_loss * FLAGS.l2_reg_lambda
def clfier_loss(self, clfier_cX, clfier_Y, entity_info):
<CHANGES>
self.scores = self.inference(
<CHANGEE>
clfier_cX, model='clfier', entity_info=entity_info, rnn_reuse=True)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.scores, labels=clfier_Y)
loss = tf.reduce_mean(cross_entropy, name='cross_entropy')
regularization_loss = tf.add_n(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
normed_embedding = tf.nn.l2_normalize(self.common_id_embedding, dim=1)
similarity_matrix = tf.matmul(normed_embedding,
tf.transpose(normed_embedding, [1, 0]))
fro_norm = tf.reduce_sum(tf.nn.l2_loss(similarity_matrix))
final_loss = loss + regularization_loss * FLAGS.l2_reg_lambda + fro_norm * FLAGS<SCANS> data path:", os.path.realpath(trainDataPath))
ner_cX, ner_Y, clfier_cX, clfier_Y, entity_info = inputs(trainDataPath)
ner_tcX, ner_tY, clfier_tcX, clfier_tY, _ = do_load_data_joint_attend(
FLAGS.test_data_path, FLAGS.max_sentence_len)
ner_total_loss = model.ner_loss(ner_cX, ner_Y)
ner_var_list = [
v for v in tf.global_variables()
if 'Attention' not in v.name and 'Clfier_output' not in v.name
]
print('ner var list:')
print([v.name for v in ner_var_list])
ner_train_op = train(ner_total_loss, var_list=ner_var_list)
ner_test_unary_score, ner_test_sequence_length = model.test_unary_score(
)
clfier_total_loss = model.clfier_loss(clfier_cX, clfier_Y, entity_info)
clfier_var_list = [
v for v in tf.global_variables()
if 'Ner_output' not in v.name and 'transitions' not in v.name
]
print('clfier var list:')
print([v.name for v in clfier_var_list])
clfier_train_op = train(clfier_total_loss, var_list=clfier_var_list)
test_clfier_score = model.test_clfier_score()
ner_seperate_list = [
v for v in tf.global_variables()
if 'Ner_output' in v.name or 'transition' in v.name
]
ner_seperate_op = train(ner_total_loss, var_list=ner_seperate_list)
clfier_seperate_list = [
v for v in tf.global_variables()
if 'Attention' in v.name or 'Clfier_output' in v.name
]
clfier_seperate_op = train(
<FILEB>
<CHANGES>
abstract.FileDescriptor.loseConnection(self, reason)
<CHANGEE>
<FILEE>
<FILEB>
## reset any references to the old doRead
del self.doRead
self.stopReading()
self.startReading()
try:
self._verifyPeer()
except Exception, e:
self.loseConnection(e)
return
## TLS handshake (including certificate verification) finished succesfully
del self.protocol.makeConnection
self.protocol.makeConnection(self)
def startTLS(self):
self.doRead = self.doHandshake
self.startReading()
def loseConnection(self, reason=failure.Failure(main.CONNECTION_DONE)):
reason = failure.Failure(reason) # accept python exceptions too
self._close_reason = reason.value
<CHANGES>
tcp.Server.loseConnection(self, reason)
<CHANGEE>
def connectionLost(self, reason):
if self.__watchdog is not None:
self.__watchdog.cancel()
self.__watchdog = None
tcp.Server.connectionLost(self, reason)
class TLSPort(tcp.Port):
"""Add TLS capabilities to a TCP port"""
transport = TLSServer
def __init__(self, port, factory, credentials, backlog=50, interface='', reactor=None, session_class=ServerSession):
tcp.Port.__init__(self, port, factory, backlog, interface, reactor)
self.credentials = credentials
self.session_class = session_class
<FILEE>
<SCANS># Copyright (C) 2007-2008 AG Projects. See LICENSE for details.
#
"""GNUTLS Twisted interface"""
__all__ = ['X509Credentials', 'connectTLS', 'listenTLS']
from time import time
from twisted.python import failure
from twisted.internet import main, base, interfaces, abstract, tcp, error
from zope.interface import implementsOnly, implementedBy
from gnutls.connection import ClientSession, ServerSession, ServerSessionFactory
from gnutls.connection import X509Credentials as _X509Credentials
from gnutls.constants import SHUT_RDWR, SHUT_WR
from gnutls.errors import *
class KeepRunning:
"""Return this class from a recurrent function to indicate that it should keep running"""
pass
class RecurrentCall(object):
"""Execute a function repeatedly at the given interval, until signaled to stop"""
def __init__(self, period, func, *args, **kwargs):
from twisted.internet import reactor
self.func = func
self.args = args
self.kwargs = kwargs
self.period = period
self.now = None
self.next = None
self.callid = reactor.callLater(period, self)
def __call__(self):
from twisted.internet import reactor
self.callid = None
if self.now is None:
self.now = time()
self.next = self.now + self.period
else:
self.now, self.next = self.next, self.next + self.period
result = self.func(*self.args, **self.kwargs)
if result is KeepRunning:
delay = max(self.next-time(), 0)
self.callid = reactor.callLater(delay, self)
def cancel(self):
if self.callid is not None:
try:
self.callid.cancel()
except ValueError:
pass
self.callid = None
class CertificateOK: pass
class X509Credentials(_X509Credentials):
"""A Twisted enhanced X509Credentials"""
verify_peer = False
verify_period = None
def verify_callback(self, peer_cert, preverify_status=None):
"""Verifies the peer certificate and raises an exception if it cannot be accepted"""
if isinstance(preverify_status, Exception):
raise preverify_status
self.check_certificate(peer_cert, cert_name='peer certificate')
class TLSMixin:
"""TLS specific functionality common to both clients and servers"""
def getPeerCertificate(self):
return self.socket.peer_certificate
def doRead(self):
try:
return tcp.Connection.doRead(self)
except (OperationWouldBlock, OperationInterrupted):
return
except GNUTLSError, e:
return e
def writeSomeData(self, data):
try:
return tcp.Connection.writeSomeData(self, data)
except OperationInterrupted:
return self.writeSomeData(data)
except OperationWouldBlock:
return 0
except GNUTLSError, e:
return e
def
<FILEB>
<CHANGES>
d = {k: _default_to_regular(v) for k, v in d.items()}
<CHANGEE>
<FILEE>
<FILEB>
""">>> trie = Trie(["abc", "def"])"""
""">>> trie.as_dict()"""
"""{'a': {'b': {'c': {True: None}}}, 'd': {'e': {'f': {True: None}}}}"""
def _default_to_regular(d):
"""Source: http://stackoverflow.com/a/26496899/4760801"""
""":param d: Nested ``defaultdict`` to convert to regular ``dict``"""
""":type d: defaultdict(str -> defaultdict(...))"""
""":return: A dict representation of the defaultdict"""
""":rtype: dict(str -> dict(str -> ...))"""
""":Example:"""
""">>> from collections import defaultdict"""
""">>> d = defaultdict(defaultdict)"""
""">>> d["one"]["two"] = "three""""
""">>> d"""
"""defaultdict(<type 'collections.defaultdict'>, {'one': defaultdict(None, {'two': 'three'})})"""
""">>> _default_to_regular(d)"""
"""{'one': {'two': 'three'}}"""
if isinstance(d, defaultdict):
<CHANGES>
d = {k: _default_to_regular(v) for k, v in d.iteritems()}
<CHANGEE>
return d
return _default_to_regular(self)
<FILEE>
<SCANS># Natural Language Toolkit: Utility functions
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Steven Bird <stevenbird1@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function
import locale
import re
import types
import textwrap
import pydoc
import bisect
import os
from itertools import islice, chain, combinations
from pprint import pprint
from collections import defaultdict, deque
from sys import version_info
from nltk.internals import slice_bounds, raise_unorderable_types
from nltk.compat import (class_types, text_type, string_types, total_ordering,
python_2_unicode_compatible, getproxies,
ProxyHandler, build_opener, install_opener,
HTTPPasswordMgrWithDefaultRealm,
ProxyBasicAuthHandler, ProxyDigestAuthHandler)
######################################################################
# Short usage message
######################################################################
def usage(obj, selfname='self'):
import inspect
str(obj) # In case it's lazy, this will load it.
if not isinstance(obj, class_types):
obj = obj.__class__
print('%s supports the following operations:' % obj.__name__)
for (name, method) in sorted(pydoc.allmethods(obj).items()):
if name.startswith('_'): continue
if getattr(method, '__deprecated__', False): continue
args, varargs, varkw, defaults = inspect.getargspec(method)
if (args and args[0]=='self' and
(defaults is None or len(args)>len(defaults))):
args = args[1:]
name = '%s.%s' % (selfname, name)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults)
print(textwrap.fill('%s%s' % (name, argspec),
initial_indent=' - ',
subsequent_indent=' '*(len(name)+5)))
##########################################################################
# IDLE
##########################################################################
def in_idle():
"""Return True if this function is run within idle. Tkinter"""
"""programs that are run in idle should never call ``Tk.mainloop``; so"""
"""this function should be used to gate all calls to ``Tk.mainloop``."""
""":warning: This function works by checking ``sys.stdin``. If the"""
"""user has modified ``sys.stdin``, then it may return incorrect"""
"""results."""
""":rtype: bool"""
import sys
return sys.stdin.__class__.__name__ in ('PyShell', 'RPCProxy')
##########################################################################
# PRETTY PRINTING
##########################################################################
def pr(data, start=0, end=None):
"""Pretty print a sequence of data items"""
""":param data: the data stream to print"""
""":type data: sequence or iter"""
""":param start: the start position"""
""":type start: int"""
""":param end: the end position"""
""":type end: int"""
pprint(list(islice(data, start, end)))
<FILEB>
<CHANGES>
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.HSQL):
<CHANGEE>
<FILEE>
<FILEB>
"""PostgreSQL input: SELECT usename, passwd FROM pg_shadow"""
"""PostgreSQL output: 'HsYIBS'||COALESCE(CAST(usename AS CHARACTER(10000)), ' ')||'KTBfZp'||COALESCE(CAST(passwd AS CHARACTER(10000)), ' ')||'LkhmuP' FROM pg_shadow"""
"""Oracle input: SELECT COLUMN_NAME, DATA_TYPE FROM SYS.ALL_TAB_COLUMNS WHERE TABLE_NAME='USERS'"""
"""Oracle output: 'GdBRAo'||NVL(CAST(COLUMN_NAME AS VARCHAR(4000)), ' ')||'czEHOf'||NVL(CAST(DATA_TYPE AS VARCHAR(4000)), ' ')||'JVlYgS' FROM SYS.ALL_TAB_COLUMNS WHERE TABLE_NAME='USERS'"""
"""Microsoft SQL Server input: SELECT name, master.dbo.fn_varbintohexstr(password) FROM master..sysxlogins"""
"""Microsoft SQL Server output: 'QQMQJO'+ISNULL(CAST(name AS VARCHAR(8000)), ' ')+'kAtlqH'+ISNULL(CAST(master.dbo.fn_varbintohexstr(password) AS VARCHAR(8000)), ' ')+'lpEqoi' FROM master..sysxlogins"""
"""@param query: query string to be processed"""
"""@type query: C{str}"""
"""@return: query string nulled, casted and concatenated"""
"""@rtype: C{str}"""
if unpack:
concatenatedQuery = ""
query = query.replace(", ", ',')
fieldsSelectFrom, fieldsSelect, fieldsNoSelect, fieldsSelectTop, fieldsSelectCase, _, fieldsToCastStr, fieldsExists = self.getFields(query)
castedFields = self.nullCastConcatFields(fieldsToCastStr)
concatenatedQuery = query.replace(fieldsToCastStr, castedFields, 1)
else:
return query
<CHANGES>
if Backend.isDbms(DBMS.MYSQL):
<CHANGEE>
if fieldsExists:
concatenatedQuery = concatenatedQuery.replace("SELECT ", "CONCAT('%s'," % kb.chars.start, 1)
concatenatedQuery += ",'%s')" % kb<SCANS>
topNum = re.search("TOP\s+([\d]+)\s+", limitedQuery, re.I).group(1)
limitedQuery = limitedQuery.replace("TOP %s " % topNum, "")
if forgeNotIn:
limitedQuery = limitedQuery.replace("SELECT ", (limitStr % 1), 1)
if " ORDER BY " not in fromFrom:
# Reference: http://vorg.ca/626-the-MS-SQL-equivalent-to-MySQLs-limit-command
if " WHERE " in limitedQuery:
limitedQuery = "%s AND %s " % (limitedQuery, self.nullAndCastField(uniqueField or field))
else:
limitedQuery = "%s WHERE %s " % (limitedQuery, self.nullAndCastField(uniqueField or field))
limitedQuery += "NOT IN (%s" % (limitStr % num)
limitedQuery += "%s %s ORDER BY %s) ORDER BY %s" % (self.nullAndCastField(uniqueField or field), fromFrom, uniqueField or "1", uniqueField or "1")
else:
match = re.search(" ORDER BY (\w+)\Z", query)
field = match.group(1) if match else field
if " WHERE " in limitedQuery:
limitedQuery = "%s AND %s " % (limitedQuery, field)
else:
limitedQuery = "%s WHERE %s " % (limitedQuery, field)
limitedQuery += "NOT IN (%s" % (limitStr % num)
limitedQuery += "%s %s)" % (field, fromFrom)
if orderBy:
limitedQuery += orderBy
return limitedQuery
def forgeQueryOutputLength(self, expression):
lengthQuery = queries[Backend.getIdentifiedDbms()].length.query
select = re.search("\ASELECT\s+", expression, re.I)
selectTopExpr = re.search("\ASELECT\s+TOP\s+[\d]+\s+(.+?)\s+FROM", expression, re.I)
_, _, _, _, _, _, fieldsStr, _ = self.getFields(expression)
if selectTopExpr:
lengthExpr = lengthQuery % ("(%s)" % expression)
elif select:
lengthExpr = expression.replace(fieldsStr
<FILEB>
<CHANGES>
return [get_base_url() + data_location + filename +'s' for filename in result.get('filename')[index_number_start:index_number_end]]
<CHANGEE>
<FILEE>
<FILEB>
"""Returns"""
"""-------"""
"""out : list"""
"""Returns the filenames of the observation summary file"""
"""Examples"""
"""--------"""
""">>> import sunpy.instr.rhessi as rhessi"""
""">>> rhessi.get_obssum_filename(('2011/04/04', '2011/04/05')) # doctest: +SKIP"""
""".. note::"""
"""This API is currently limited to providing data from whole days only."""
# need to download and inspect the dbase file to determine the filename
# for the observing summary data
f = get_obssumm_dbase_file(time_range)
data_location = 'metadata/catalog/'
result = parse_obssumm_dbase_file(f[0])
_time_range = TimeRange(time_range)
index_number_start = _time_range.start.day - 1
index_number_end = _time_range.end.day - 1
<CHANGES>
return [data_servers[0] + data_location + filename + 's' for filename in result.get('filename')[index_number_start:index_number_end]]
<CHANGEE>
def get_obssumm_file(time_range):
"""Download the RHESSI observing summary data from one of the RHESSI"""
"""servers."""
"""Parameters"""
"""----------"""
"""time_range : `str`, `sunpy.time.TimeRange`"""
"""A TimeRange or time range compatible string"""
"""Returns"""
"""-------"""
"""out : tuple"""
"""Return a tuple (filename, headers) where filename is the local file"""
"""name under which the object can be found, and headers is"""
<FILEE>
<SCANS> observing summary database file. This file lists the"""
"""name of observing summary files for specific time ranges."""
"""Parameters"""
"""----------"""
"""time_range : `str`, `sunpy.time.TimeRange`"""
"""A `~sunpy.time.TimeRange` or `~sunpy.time.TimeRange` compatible string."""
"""Returns"""
"""-------"""
"""value : `tuple`"""
"""Return a `tuple` (filename, headers) where filename is the local file"""
"""name under which the object can be found, and headers is"""
"""whatever the info() method of the object returned by urlopen."""
"""Examples"""
"""--------"""
""">>> import sunpy.instr.rhessi as rhessi"""
""">>> rhessi.get_obssumm_dbase_file(('2011/04/04', '2011/04/05')) # doctest: +SKIP"""
"""References"""
"""----------"""
"""| http://hesperia.gsfc.nasa.gov/ssw/hessi/doc/guides/hessi_data_access.htm#Observing Summary Data"""
""".. note::"""
"""This API is currently limited to providing data from whole days only."""
# http://hesperia.gsfc.nasa.gov/hessidata/dbase/hsi_obssumm_filedb_200311.txt
_time_range = TimeRange(time_range)
data_location = 'dbase/'
if _time_range.start < parse_time("2002/02/01"):
raise ValueError("RHESSI summary files are not available for before 2002-02-01")
url_root = get_base_url() + data_location
url = url_root + _time_range.start.strftime("hsi_obssumm_filedb_%Y%m.txt")
f = urllib.request.urlretrieve(url)
return f
def parse_obssumm_dbase_file(filename):
"""Parse the RHESSI observing summary database file. This file lists the"""
"""name of observing summary files for specific time ranges along with other"""
"""info"""
"""Parameters"""
"""----------"""
"""filename : `str`"""
"""The filename of the obssumm dbase file."""
"""Returns"""
"""-------"""
"""out : `dict`"""
"""Return a `dict` containing the parsed data in the dbase file."""
"""Examples"""
"""--------"""
""">>> import sunpy.instr.rhessi as rhessi"""
<FILEB>
<CHANGES>
LOG.debug('Started child %d', pid)
<CHANGEE>
<FILEE>
<FILEB>
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_LI('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
self.launcher = self._child_process(wrap.service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(
self.launcher)
if not _is_sighup_and_daemon(signo):
self.launcher.wait()
break
self.launcher.restart()
os._exit(status)
<CHANGES>
LOG.info(_LI('Started child %d'), pid)
<CHANGEE>
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
"""Launch a service with a given number of workers."""
""":param service: a service to launch, must be an instance of"""
""":class:`oslo_service.service.ServiceBase`"""
""":param workers: a number of processes in which a service"""
"""will be running"""
_check_service_base(service)
wrap = ServiceWrapper(service, workers)
LOG.info(_LI('Starting %d workers'), wrap.workers)
<FILEE>
<SCANS>(_LW('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def _respawn_children(self):
while self.running:
wrap = self._wait_child()
if not wrap:
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
eventlet.greenthread.sleep(self.wait_interval)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
systemd.notify_once()
if self.conf.log_options:
LOG.debug('Full set of CONF:')
self.conf.log_opt_values(LOG, logging.DEBUG)
try:
while True:
self.handle_signal()
self._respawn_children()
# No signal means that stop was called. Don't clean up here.
if not self.sigcaught:
return
signame = self.signal_handler.signals_to_name[self.sigcaught]
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
self.conf.reload_config_files()
for service in set(
[wrap.service for wrap in self.children.values()]):
service.reset()
for pid in self.children:
os.kill(pid, signal.SIGTERM)
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
# if we are here it means that we try to do gracefull shutdown.
# add alarm watching that graceful_shutdown_timeout is not exceeded
if (self.conf.graceful_shutdown_timeout and
self.signal_handler.is_signal_supported('SIGALRM')):
signal.alarm(self.conf.graceful_shutdown_timeout)
self.stop()
def stop(self):
"""Terminate child processes and wait on each."""
self.running = False
LOG.debug("Stop services.")
for service in set(
[wrap.service for wrap in self.children.values()]):
service.stop()
LOG.debug("Killing children.")
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(ServiceBase):
"""Service object for binaries running on hosts."""
<FILEB>
<CHANGES>
raise RuntimeError("Failed to access %s at %s" % (filename, url))
<CHANGEE>
<FILEE>
<FILEB>
# Assume the default URL for DTDs if the top parent
# does not contain an absolute path
source = "http://www.ncbi.nlm.nih.gov/dtd/"
else:
source = os.path.dirname(url)
# urls always have a forward slash, don't use os.path.join
url = source.rstrip("/") + "/" + systemId
self.dtd_urls.append(url)
# First, try to load the local version of the DTD file
location, filename = os.path.split(systemId)
handle = self.open_dtd_file(filename)
if not handle:
# DTD is not available as a local file. Try accessing it through
# the internet instead.
from Bio._py3k import StringIO
try:
handle = _urlopen(url)
except IOError:
<CHANGES>
raise RuntimeException("Failed to access %s at %s" % (filename, url))
<CHANGEE>
text = handle.read()
handle.close()
self.save_dtd_file(filename, text)
handle = StringIO(text)
parser = self.parser.ExternalEntityParserCreate(context)
parser.ElementDeclHandler = self.elementDecl
parser.ParseFile(handle)
handle.close()
self.dtd_urls.pop()
return 1
<FILEE>
<SCANS>
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, and expat
# didn't notice any errors, so self.object should be
# defined. If not, this is a bug.
raise RuntimeError("Failed to parse the XML file correctly, possibly due to a bug in Bio.Entrez. Please contact the Biopython developers at biopython-dev@biopython.org for assistance.")
else:
# We did not see the initial <!xml declaration, so
# probably the input data is not in XML format.
raise NotXMLError("XML declaration not found")
self.parser.Parse("", True)
self.parser = None
return
try:
self.parser.Parse(text, False)
except expat.ExpatError as e:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, so we can be sure
# that we are parsing XML data. Most likely, the XML file
# is corrupted.
raise CorruptedXMLError(e)
else:
# We have not seen the initial <!xml declaration, so
# probably the input data is not in XML format.
raise NotXMLError(e)
if not self.stack:
# Haven't read enough from the XML file yet
continue
records = self.stack[0]
if not isinstance(records, list):
raise ValueError("The XML file does not represent a list. Please use Entrez.read instead of Entrez.parse")
while len(records) > 1: # Then the top record is finished
record = records.pop(0)
yield record
def xmlDeclHandler(self, version, encoding, standalone):
# XML declaration found; set the handlers
self.parser.StartElementHandler = self.startElementHandler
self.parser.EndElementHandler = self.endElementHandler
self.parser.CharacterDataHandler = self.characterDataHandler
self.parser.ExternalEntityRefHandler = self.externalEntityRefHandler
self.parser.StartNamespaceDeclHandler = self.startNamespaceDeclHandler
def startNamespaceDeclHandler(self, prefix, un):
raise NotImplementedError("The Bio.Entrez parser cannot handle XML data that make use of XML namespaces")
def startElementHandler(self, name, attrs):
self.content = ""
if name in self.lists:
object = ListElement()
elif name in self.dictionaries:
object = DictionaryElement()
elif name in self.structures:
object = StructureElement(self.structures[name])
elif name in self.items: # Only appears in ESummary
name = str(attrs["Name"]) # convert from Unicode
del attrs["Name"]
itemtype = str(attrs["Type"]) # convert from Unicode
del attrs["Type"]
if itemtype=="Structure":
object = DictionaryElement()
elif name in ("ArticleIds", "History"):
object = StructureElement(["pubmed", "medline"])
elif itemtype=="List":
object = ListElement()
else:
object = StringElement()
object.itemname = name
object.itemtype = itemtype
elif name in self.strings + self.errors + self.integers:
self.attributes = attrs
return
else:
# Element not found in DTD
if self.
<FILEB>
<CHANGES>
self.owner = discord.utils.find(lambda m: m.id == self.config.owner_id and m.voice_channel, self.get_all_members())
<CHANGEE>
<FILEE>
<FILEB>
except discord.Forbidden:
print("Error: Cannot delete message \"%s\", no permission" % message.clean_content)
except discord.NotFound:
print("Warning: Cannot delete message \"%s\", message not found" % message.clean_content)
async def safe_edit_message(self, message, new, *, send_if_fail=False):
try:
return await self.edit_message(message, new)
except discord.NotFound:
print("Warning: Cannot edit message \"%s\", message not found" % message.clean_content)
if send_if_fail:
print("Sending instead")
return await self.safe_send_message(message.channel, new)
# noinspection PyMethodOverriding
def run(self):
return super().run(self.config.username, self.config.password)
async def on_ready(self):
print('Connected!\n')
print("Bot: %s/%s" % (self.user.id, self.user.name))
<CHANGES>
self.owner = discord.utils.get(self.get_all_members(), id=self.config.owner_id)
<CHANGEE>
if not self.owner:
print("Owner could not be found on any server (id: %s)" % self.config.owner_id)
else:
print("Owner: %s/%s" % (self.owner.id, self.owner.name))
if self.config.owner_id == self.user.id:
print("\n"
"[NOTICE] You have either set the OwnerID config option to the bot's id instead "
"of yours, or you've used your own credentials to log the bot in instead of the "
"bot's account (the bot needs its own account to work properly).")
print()
print("Bound to channels: %s" % self.config.bound_channels) # TODO: Print list of channels
# TODO: Make this prettier and easier to read (in the console)
<FILEE>
<SCANS>format(prefix, entry.title)[:128]
game = discord.Game(name=name)
await self.change_status(game)
# TODO: Change these to check then send
async def safe_send_message(self, dest, content, *, tts=False):
try:
return await self.send_message(dest, content, tts=tts)
except discord.Forbidden:
print("Error: Cannot send message to %s, no permission" % dest.name)
except discord.NotFound:
print("Warning: Cannot send message to %s, invalid channel?" % dest.name)
async def safe_delete_message(self, message):
try:
return await self.delete_message(message)
print("Command prefix is %s" % self.config.command_prefix)
print("Whitelist check is %s" % ['disabled', 'enabled'][self.config.white_list_check])
print("Skip threshold at %s votes or %s%%" % (self.config.skips_required, self._fixg(self.config.skip_ratio_required*100)))
print("Now Playing message @mentions are %s" % ['disabled', 'enabled'][self.config.now_playing_mentions])
print("Autosummon is %s" % ['disabled', 'enabled'][self.config.auto_summon])
print("Auto-playlist is %s" % ['disabled', 'enabled'][self.config.auto_playlist])
print("Downloaded songs will be %s after playback" % ['deleted', 'saved'][self.config.save_videos])
print()
if self.servers:
print('--Server List--')
[print(s) for s in self.servers]
else:
print("No servers have been joined yet.")
print()
# maybe option to leave the ownerid blank and generate a random command for the owner to use
# wait_for_message is pretty neato
if self.config.auto_summon:
as_ok = await self._auto_summon()
if self.config.auto_playlist and as_ok:
await self.on_finished_playing(await self.get_player(self.owner.voice_channel))
async def handle_help(self):
"""Usage: {command_prefix}help"""
"""Prints a help message"""
helpmsg = "**Commands**\n```"
commands = []
# TODO: Get this to format nicely
for att in dir(self):
if att.startswith('handle_') and att
<FILEB>
<CHANGES>
kb.targetUrls.add((url, method, urldecode(data), cookie))
<CHANGEE>
<FILEE>
<FILEB>
scheme = "https"
# Avoid to add a static content length header to
# conf.httpHeaders and consider the following lines as
# POSTed data
if key == "Content-Length":
data = ""
params = True
# Avoid proxy and connection type related headers
elif key not in ( "Proxy-Connection", "Connection" ):
conf.httpHeaders.append((str(key), str(value)))
if conf.scope:
getPostReq &= re.search(conf.scope, host) is not None
if getPostReq and (params or cookie):
if not url.startswith("http"):
url = "%s://%s:%s%s" % (scheme or "http", host, port or "80", url)
scheme = None
port = None
if not kb.targetUrls or url not in addedTargetUrls:
<CHANGES>
kb.targetUrls.add((url, method, data, cookie))
<CHANGEE>
addedTargetUrls.add(url)
fp = openFile(reqFile, "rb")
content = fp.read()
content = content.replace("\r", "")
if conf.scope:
logger.info("using regular expression '%s' for filtering targets" % conf.scope)
__parseBurpLog(content)
__parseWebScarabLog(content)
def __loadQueries():
"""Loads queries from 'xml/queries.xml' file."""
for node in xmlobject.XMLFile(path=paths.QUERIES_XML, textfilter=sanitizeStr).root.dbms:
queries[node.value] = node
<FILEE>
<SCANS>#!/usr/bin/env python
"""$Id$"""
"""Copyright (c) 2006-2010 sqlmap developers (http://sqlmap.sourceforge.net/)"""
"""See the file 'doc/COPYING' for copying permission"""
import codecs
import cookielib
import difflib
import inspect
import logging
import os
import re
import socket
import sys
import threading
import urllib2
import urlparse
from extra.clientform.clientform import ParseResponse
from extra.clientform.clientform import ParseError
from extra.keepalive import keepalive
from extra.xmlobject import xmlobject
from lib.controller.checks import checkConnection
from lib.core.common import Backend
from lib.core.common import extractRegexResult
from lib.core.common import getConsoleWidth
from lib.core.common import getFileItems
from lib.core.common import getFileType
from lib.core.common import normalizePath
from lib.core.common import ntToPosixSlashes
from lib.core.common import openFile
from lib.core.common import parseTargetDirect
from lib.core.common import parseTargetUrl
from lib.core.common import paths
from lib.core.common import randomRange
from lib.core.common import randomStr
from lib.core.common import readCachedFileContent
from lib.core.common import readInput
from lib.core.common import runningAsAdmin
from lib.core.common import sanitizeStr
from lib.core.common import UnicodeRawConfigParser
from lib.core.convert import urldecode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.data import queries
from lib.core.datatype import advancedDict
from lib.core.datatype import injectionDict
from lib.core.enums import DBMS
from lib.core.enums import HTTPHEADER
from lib.core.enums import HTTPMETHOD
from lib.core.enums import PAYLOAD
from lib.core.enums import PRIORITY
from lib.core.exception import sqlmapFilePathException
from lib.core.exception import sqlmapGenericException
from lib.core.exception import sqlmapMissingDependence
from lib.core.exception import sqlmapMissingMandatoryOptionException
from lib.core.exception import sqlmapMissingPrivileges
from lib.core.exception import sqlmapSilentQuitException
from lib.core.exception import sqlmapSyntaxException
from lib.core.exception import sqlmapUnsupportedDBMSException
from lib.core.exception import sqlmapUserQuitException
from lib.core.optiondict import optDict
from lib.core.settings import DEFAULT_PAGE_ENCODING
from lib.core.settings import IS_WIN
from lib.core.settings import PLATFORM
from lib.core.settings import PYVERSION
from lib.core.settings import SITE
from lib.core.settings import DEFAULT_TOR_
<FILEB>
<CHANGES>
X509Extension(b('basicConstraints'), True, b('CA:false'))])
<CHANGEE>
<FILEE>
<FILEB>
request = X509Req()
subject = request.get_subject()
self.assertTrue(
isinstance(subject, X509NameType),
"%r is of type %r, should be %r" % (subject, type(subject), X509NameType))
subject.commonName = "foo"
self.assertEqual(request.get_subject().commonName, "foo")
del request
subject.commonName = "bar"
self.assertEqual(subject.commonName, "bar")
def test_get_subject_wrong_args(self):
request = X509Req()
self.assertRaises(TypeError, request.get_subject, None)
def test_add_extensions(self):
"""L{X509Req.add_extensions} accepts a C{list} of L{X509Extension}"""
"""instances and adds them to the X509 request."""
request = X509Req()
request.add_extensions([
<CHANGES>
X509Extension('basicConstraints', True, 'CA:false')])
<CHANGEE>
# XXX Add get_extensions so the rest of this unit test can be written.
def test_add_extensions_wrong_args(self):
"""L{X509Req.add_extensions} raises L{TypeError} if called with the wrong"""
"""number of arguments or with a non-C{list}. Or it raises L{ValueError}"""
"""if called with a C{list} containing objects other than L{X509Extension}"""
"""instances."""
request = X509Req()
self.assertRaises(TypeError, request.add_extensions)
self.assertRaises(TypeError, request.add_extensions, object())
self.assertRaises(ValueError, request.add_extensions, [object()])
self.assertRaises(TypeError, request.add_extensions, [], None)
class X509Tests(TestCase, _PKeyInteractionTestsMixin):
<FILEE>
<SCANS>rBA+sJEBbqx5RdXbIRGicPG/8qQ4Zm1SKOgotcbwiaor2yxZ2wIDAQAB"""
"""AoGBAPCgMpmLxzwDaUmcFbTJUvlLW1hoxNNYSu2jIZm1k/hRAcE60JYwvBkgz3UB"""
"""yMEh0AtLxYe0bFk6EHah11tMUPgscbCq73snJ++8koUw+csk22G65hOs51bVb7Aa"""
"""6JBe67oLzdtvgCUFAA2qfrKzWRZzAdhUirQUZgySZk+Xq1pBAkEA/kZG0A6roTSM"""
"""BVnx7LnPfsycKUsTumorpXiylZJjTi9XtmzxhrYN6wgZlDOOwOLgSQhszGpxVoMD"""
"""u3gByT1b2QJBAPtL3mSKdvwRu/+40zaZLwvSJRxaj0mcE4BJOS6Oqs/hS1xRlrNk"""
"""PpQ7WJ4yM6ZOLnXzm2mKyxm50Mv64109FtMCQQDOqS2KkjHaLowTGVxwC0DijMfr"""
"""I9Lf8sSQk32J5VWCySWf5gGTfEnpmUa41gKTMJIbqZZLucNuDcOtzUaeWZlZAkA8"""
"""ttXigLnCqR486JDPTi9ZscoZkZ+w7y6e/hH8t6d5Vjt48JVyfjPIaJY+km58LcN3"""
"""6AWSeGAdtRFHVzR7oHjVAkB4hutvxiOeiIVQNBhM6RSI9aBPMI21DoX2JRoxvNW2"""
"""cbvAhow217X9V0dVerEOKxnNYspXRrh36h7k4mQA+sDq"""
"""-----END RSA PRIVATE KEY-----""")
server_cert_pem = b("""-----BEGIN CERTIFICATE-----"""
"""MIICKDCCAZGgAwIBAgIJAJn/HpR21r/8MA0GCSqGSIb3DQEBBQUAMFgxCzAJBgNV"""
"""BAYTAlVTMQswCQYDVQQIEwJJTDE
<FILEB>
<CHANGES>
clone._select = [SQL('1')]
<CHANGEE>
<FILEE>
<FILEB>
return query
def aggregate(self, aggregation=None, convert=True):
return self._aggregate(aggregation).scalar(convert=convert)
def count(self):
if self._distinct or self._group_by:
return self.wrapped_count()
# defaults to a count() of the primary key
return self.aggregate(convert=False) or 0
def wrapped_count(self, clear_limit=True):
clone = self.order_by()
if clear_limit:
clone._limit = clone._offset = None
sql, params = clone.sql()
wrapped = 'SELECT COUNT(1) FROM (%s) AS wrapped_select' % sql
rq = self.model_class.raw(wrapped, *params)
return rq.scalar() or 0
def exists(self):
clone = self.paginate(1, 1)
<CHANGES>
clone._select = [self.model_class._meta.primary_key]
<CHANGEE>
return bool(clone.scalar())
def get(self):
clone = self.paginate(1, 1)
try:
return clone.execute().next()
except StopIteration:
raise self.model_class.DoesNotExist(
'Instance matching query does not exist:\nSQL: %s\nPARAMS: %s'
% self.sql())
def first(self):
res = self.execute()
res.fill_cache(1)
<FILEE>
<SCANS>', ()))
def transaction(self):
return transaction(self)
def commit_on_success(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self.transaction():
return func(*args, **kwargs)
return inner
def savepoint(self, sid=None):
if not self.savepoints:
raise NotImplementedError
return savepoint(self, sid)
def get_tables(self):
raise NotImplementedError
def get_indexes_for_table(self, table):
raise NotImplementedError
def sequence_exists(self, seq):
raise NotImplementedError
def create_table(self, model_class, safe=False):
qc = self.compiler()
return self.execute_sql(*qc.create_table(model_class, safe))
def create_index(self, model_class, fields, unique=False):
qc = self.compiler()
if not isinstance(fields, (list, tuple)):
raise ValueError('Fields passed to "create_index" must be a list '
'or tuple: "%s"' % fields)
fobjs = [
model_class._meta.fields[f] if isinstance(f, basestring) else f
for f in fields]
return self.execute_sql(*qc.create_index(model_class, fobjs, unique))
def create_foreign_key(self, model_class, field, constraint=None):
qc = self.compiler()
return self.execute_sql(*qc.create_foreign_key(
model_class, field, constraint))
def create_sequence(self, seq):
if self.sequences:
qc = self.compiler()
return self.execute_sql(*qc.create_sequence(seq))
def drop_table(self, model_class, fail_silently=False, cascade=False):
qc = self.compiler()
return self.execute_sql(*qc.drop_table(
model_class, fail_silently, cascade))
def drop_sequence(self, seq):
if self.sequences:
qc = self.compiler()
return self.execute_sql(*qc.drop_sequence(seq))
def extract_date(self, date_part, date_field):
return fn.EXTRACT(Clause(date_part, R('FROM'), date_field))
def truncate_date(self, date_part, date_field):
return fn.DATE_TRUNC(SQL(date_part), date_field)
class SqliteDatabase(Database):
drop_cascade = False
foreign_keys = False
insert_many = sqlite3.sqlite_version_info >= (3, 7, 11, 0)
limit_max = -1
op_overrides = {
OP_LIKE: 'GLOB',
OP_ILIKE: 'LIKE',
}
def _connect(self, database, **kwargs):
conn = sqlite3.connect(database, **kwargs)
self._add_conn_hooks(conn)
return conn
def _add_conn_hooks(self, conn):
conn.create_function('date_part',
<FILEB>
<CHANGES>
image_id = str(uuid.uuid4())
<CHANGEE>
<FILEE>
<FILEB>
self.assertEqual('XXX', ''.join(get_iter))
store.delete(location)
def test_delayed_delete_with_auth(self):
"""Ensure delete works with delayed delete and auth"""
"""Reproduces LP bug 1238604."""
swift_store_user = self.swift_config['swift_store_user']
tenant_name, username = swift_store_user.split(':')
tenant_id, auth_token, service_catalog = keystone_authenticate(
self.swift_config['swift_store_auth_address'],
self.swift_config['swift_store_auth_version'],
tenant_name,
username,
self.swift_config['swift_store_key'])
context = glance.context.RequestContext(
tenant=tenant_id,
service_catalog=service_catalog,
auth_tok=auth_token)
store = self.get_store(context=context)
<CHANGES>
image_id = uuidutils.generate_uuid()
<CHANGEE>
image_data = StringIO.StringIO('data')
uri, _, _, _ = store.add(image_id, image_data, 4)
location = glance.store.location.Location(
self.store_name,
store.get_store_location_class(),
uri=uri,
image_id=image_id)
container_name = location.store_location.container
container, _ = swift_get_container(self.swift_client, container_name)
(get_iter, get_size) = store.get(location)
self.assertEqual(4, get_size)
self.assertEqual('data', ''.join(get_iter))
<FILEE>
<SCANS>fail_end_of_download(self):
"""Get an object from Swift where Swift does not complete the request"""
"""in one attempt. Fails at the end of the download"""
self.config(
swift_store_retry_get_count=1,
)
store = self.get_store()
image_id = str(uuid.uuid4())
image_size = 1024 * 1024 * 5 # 5 MB
chars = string.ascii_uppercase + string.digits
image_data = ''.join(random.choice(chars) for x in range(image_size))
image_checksum = hashlib.md5(image_data)
uri, add_size, add_checksum, _ = store.add(image_id,
image_data,
image_size)
location = glance.store.location.Location(
self.store_name,
store.get_store_location_class(),
uri=uri,
image_id=image_id)
def iter_wrapper(iterable):
bytes_received = 0
for chunk in iterable:
yield chunk
bytes_received += len(chunk)
if bytes_received == image_size:
raise StopIteration
(get_iter, get_size) = store.get(location)
get_iter.wrapped = glance.store.swift.swift_retry_iter(
iter_wrapper(get_iter.wrapped), image_size,
store, location.store_location)
self.assertEqual(image_size, get_size)
received_data = ''.join(get_iter.wrapped)
self.assertEqual(image_data, received_data)
self.assertEqual(image_checksum.hexdigest(),
hashlib.md5(received_data).hexdigest())
def stash_image(self, image_id, image_data):
container_name = self.swift_config['swift_store_container']
swift_put_object(self.swift_client,
container_name,
image_id,
'XXX')
#NOTE(bcwaldon): This is a hack until we find a better way to
# build this URL
auth_url = self.swift_config['swift_store_auth_address']
auth_url = urlparse.urlparse(auth_url)
user = urllib.quote(self.swift_config['swift_store_user'])
key = self.swift_config['swift_store_key']
netloc = ''.join(('%s:%s' % (user, key), '@', auth_url.netloc))
path = os.path.join(auth_url.path, container_name, image_id)
# This is an auth url with /<CONTAINER>/<OBJECT> on the end
return 'swift+http://%s%s'
<FILEB>
<CHANGES>
result = file_output.file_outputter(self.text, filename, overwrite=True)
<CHANGEE>
<FILEE>
<FILEB>
"""results before saving them, just for the Hack Day in May 2013."""
def __init__(self, text):
"""Make a new VisTextDisplay"""
self.text_display = QtGui.QDialog()
self.setupUi(self.text_display)
self.text = text
self.show_text.setPlainText(text)
def trigger(self):
"""Cause the window to show up."""
self.btn_save_as.clicked.connect(self.save_as)
self.btn_close.clicked.connect(self.close)
self.text_display.exec_()
def save_as(self):
"""Save the file."""
filename = str(QtGui.QFileDialog.getSaveFileName(None,
'Save As',
'',
'*.txt'))
<CHANGES>
result = file_output.file_outputter(self.text, filename, 'OVERWRITE')
<CHANGEE>
if result[1] is not None:
QtGui.QMessageBox.information(None,
'File Output Failed',
result[1],
QtGui.QMessageBox.StandardButtons(
QtGui.QMessageBox.Ok),
QtGui.QMessageBox.Ok)
def close(self):
"""Close the window."""
self.text_display.done(0)
class GraphDisplay(Display):
"""Output a graph."""
<FILEE>
<SCANS>'',
None)
# remove the extension, if they put it
if len(file_save) > '.ly' == file_save[-3:]:
pathname = file_save[:-3]
else:
pathname = file_save
# make sure we have a (potentially) viable pathname
if len(pathname) < 1:
msg = u'LilyPond pathname is not viable.' + unicode(pathname)
self._controller.error.emit(msg)
return
# output the LilyPond file
ly_me = file_output.file_outputter(
contents=self._data[i],
pathname=pathname,
extension=extension,
overwrite=True)
# run LilyPond
if ly_me[1] is not None:
# There was an error while writing the file, so we can't continue
msg = 'File output problem in LilyPondDisplay: \n' + str(ly_me[1])
self._controller.error.emit(msg)
return
else:
OutputLilyPond.run_lilypond(ly_me[0])
self._controller.display_shown.emit()
<FILEB>
<CHANGES>
a = Address(id=12, email_address='foobar')
<CHANGEE>
<FILEE>
<FILEB>
self._test_cascade_to_pending(cascade, True)
def test_refresh_cascade_pending_orphan(self):
cascade = 'save-update, refresh-expire, delete, delete-orphan'
self._test_cascade_to_pending(cascade, False)
def test_expire_cascade_pending(self):
cascade = 'save-update, refresh-expire'
self._test_cascade_to_pending(cascade, True)
def test_refresh_cascade_pending(self):
cascade = 'save-update, refresh-expire'
self._test_cascade_to_pending(cascade, False)
@testing.resolve_artifact_names
def _test_cascade_to_pending(self, cascade, expire_or_refresh):
mapper(User, users, properties={
'addresses':relationship(Address, cascade=cascade)
})
mapper(Address, addresses)
s = create_session()
u = s.query(User).get(8)
<CHANGES>
a = Address(email_address='foobar')
<CHANGEE>
u.addresses.append(a)
if expire_or_refresh:
s.expire(u)
else:
s.refresh(u)
if "delete-orphan" in cascade:
assert a not in s
else:
assert a in s
assert a not in u.addresses
s.flush()
@testing.resolve_artifact_names
<FILEE>
<SCANS>expire(p1)
sess.expire(e1, ['status'])
sess.expire(e2)
for p in [p1, e2]:
assert 'name' not in p.__dict__
assert 'name' in e1.__dict__
assert 'status' not in e2.__dict__
assert 'status' not in e1.__dict__
e1.name = 'new engineer name'
def go():
sess.query(Person).all()
self.assert_sql_count(testing.db, go, 1)
for p in [p1, e1, e2]:
assert 'name' in p.__dict__
assert 'status' not in e2.__dict__
assert 'status' not in e1.__dict__
def go():
assert e1.name == 'new engineer name'
assert e2.name == 'engineer2'
assert e1.status == 'new engineer'
assert e2.status == 'old engineer'
self.assert_sql_count(testing.db, go, 2)
eq_(Engineer.name.get_history(e1), (['new engineer name'],(), ['engineer1']))
class ExpiredPendingTest(_fixtures.FixtureTest):
run_define_tables = 'once'
run_setup_classes = 'once'
run_setup_mappers = None
run_inserts = None
@testing.resolve_artifact_names
def test_expired_pending(self):
mapper(User, users, properties={
'addresses':relationship(Address, backref='user'),
})
mapper(Address, addresses)
sess = create_session()
a1 = Address(email_address='a1')
sess.add(a1)
sess.flush()
u1 = User(name='u1')
a1.user = u1
sess.flush()
# expire 'addresses'. backrefs
# which attach to u1 will expect to be "pending"
sess.expire(u1, ['addresses'])
# attach an Address. now its "pending"
# in user.addresses
a2 = Address(email_address='a2')
a2.user = u1
# expire u1.addresses again. this expires
# "pending" as well.
sess.expire(u1, ['addresses'])
# insert a new row
sess.execute(addresses.insert(), dict(email_address='a3', user_id=u1.id))
# only two addresses pulled from the DB, no "pending"
assert len(u1.addresses) == 2
sess.flush()
sess.expire_all()
assert len(u1.addresses) == 3
class RefreshTest(_fixtures.FixtureTest):
@testing.resolve_artifact_names
def test_refresh(self):
mapper(User, users, properties={
'addresses':relationship(mapper(Address, addresses), backref='user')
})
s = create_session()
u = s.query(User).get(7)
u.name = 'foo'
a = Address()
assert sa.orm.
<FILEB>
<CHANGES>
s = sum(time_opts.values())
<CHANGEE>
<FILEE>
<FILEB>
if (new_o.owner and
isinstance(new_o.owner.op, GpuFromHost) and
new_o.owner.inputs[0].type == o.type):
new_o = new_o.owner.inputs[0]
else:
new_o = safe_to_cpu(new_o)
new_nodes.append(new_o)
fgraph.replace_all_validate(zip(fgraph.outputs, new_nodes),
reason=self.__class__.__name__)
return (self, toposort_timing, time_opts, node_created, process_count)
@staticmethod
def print_profile(stream, prof, level=0):
(opt, toposort_timing, time_opts, node_created, process_count) = prof
blanc = (' ' * level)
print(blanc, "GraphToGPUOptimizer", end=' ', file=stream)
print(blanc, getattr(opt, "name",
getattr(opt, "__name__", "")), file=stream)
print(blanc, " time io_toposort %.3fs" % toposort_timing, file=stream)
<CHANGES>
s = sum([v for k, v in time_opts.iteritems()])
<CHANGEE>
print(blanc, "Total time taken by local optimizers %.3fs " % s, file=stream)
count_opt = []
not_used = []
not_used_time = 0
for o, count in iteritems(process_count):
if count > 0:
count_opt.append((time_opts[o], count,
node_created[o], o))
else:
not_used.append((time_opts[o], o))
not_used_time += time_opts[o]
if count_opt:
<FILEE>
<SCANS> not_used.sort(key=lambda nu: (nu[0], str(nu[1])))
for (t, o) in not_used[::-1]:
if t > 0:
# Skip opt that have 0 times, they probably wasn't even tried.
print(blanc + " ", ' %.3fs - %s' % (t, o), file=stream)
print(file=stream)
@staticmethod
def merge_profile(prof1, prof2):
# (opt, toposort_timing, time_opts, node_created, process_count) = prof1
local_optimizers = OrderedSet(prof1[0].local_optimizers_all).union(
prof2[0].local_optimizers_all)
def merge_dict(d1, d2):
"""merge 2 dicts by adding the values."""
d = d1.copy()
for k, v in iteritems(d2):
if k in d:
d[k] += v
else:
d[k] = v
return d
local_optimizers_map = merge_dict(prof1[0].local_optimizers_map,
prof2[0].local_optimizers_map)
new_opt = GraphToGPU(local_optimizers, local_optimizers_map)
toposort_timing = prof1[1] + prof2[1]
time_opts = merge_dict(prof1[2], prof2[2])
node_created = merge_dict(prof1[3], prof2[3])
process_count = merge_dict(prof1[4], prof2[4])
return (new_opt,
toposort_timing,
time_opts,
node_created,
process_count)
def print_summary(self, stream=sys.stdout, level=0, depth=-1):
print("%s%s (%i)" % (
(' ' * level), self.__class__.__name__, id(self)), file=stream)
if depth != 0:
map_values = []
for opts in self.local_optimizers_map.values():
map_values += opts
for opt in self.local_optimizers_all + map_values:
opt.print_summary(stream, level=(level + 2), depth=(depth - 1))
@local_optimizer([GpuFromHost, GpuToGpu, HostFromGpu])
def local_cut_gpu_transfers(node):
# gpu[ab] -> host -> gpub
if (isinstance(node.op, GpuFromHost) and
node.inputs[0].owner and
isinstance(node
<FILEB>
<CHANGES>
return redirect(reverse('metastore:show_tables', kwargs={'database': request.COOKIES.get('hueBeeswaxLastDatabase', 'default')}))
<CHANGEE>
<FILEE>
<FILEB>
from beeswax.design import hql_query
from beeswax.models import SavedQuery, MetaInstall
from beeswax.server import dbms
from beeswax.server.dbms import get_query_server_config
from filebrowser.views import location_to_url
from metastore.forms import LoadDataForm, DbForm
from metastore.settings import DJANGO_APPS
from notebook.connectors.base import Notebook
LOG = logging.getLogger(__name__)
SAVE_RESULTS_CTAS_TIMEOUT = 300 # seconds
def check_has_write_access_permission(view_func):
"""Decorator ensuring that the user is not a read only user."""
def decorate(request, *args, **kwargs):
if not has_write_access(request.user):
raise PopupException(_('You are not allowed to modify the metastore.'), detail=_('You have must have metastore:write permissions'), error_code=301)
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
def index(request):
<CHANGES>
return redirect(reverse('metastore:show_tables'))
<CHANGEE>
"""Database Views"""
def databases(request):
search_filter = request.GET.get('filter', '')
db = dbms.get(request.user)
databases = db.get_databases(search_filter)
return render("metastore.mako", request, {
'breadcrumbs': [],
'database': None,
'databases': databases,
'partitions': [],
'has_write_access': has_write_access(request.user),
})
<FILEE>
<SCANS> you really want to delete the database(s)?")
return render('confirm.mako', request, {'url': request.path, 'title': title})
def get_database_metadata(request, database):
db = dbms.get(request.user)
response = {'status': -1, 'data': ''}
try:
db_metadata = db.get_database(database)
response['status'] = 0
db_metadata['hdfs_link'] = location_to_url(db_metadata['location'])
response['data'] = db_metadata
except Exception, ex:
response['status'] = 1
response['data'] = _("Cannot get metadata for database: %s") % (database,)
return JsonResponse(response)
def table_queries(request, database, table):
qfilter = Q(data__icontains=table) | Q(data__icontains='%s.%s' % (database, table))
response = {'status': -1, 'queries': []}
try:
queries = [{'doc': d.to_dict(), 'data': Notebook(document=d).get_data()}
for d in Document2.objects.filter(qfilter, owner=request.user, type='query', is_history=False)[:50]]
response['status'] = 0
response['queries'] = queries
except Exception, ex:
response['status'] = 1
response['data'] = _("Cannot get queries related to table %s.%s: %s") % (database, table, ex)
return JsonResponse(response)
"""Table Views"""
def show_tables(request, database=None):
if database is None:
database = request.COOKIES.get('hueBeeswaxLastDatabase', 'default') # Assume always 'default'
db = dbms.get(request.user)
try:
databases = db.get_databases()
if database not in databases:
database = 'default'
if request.method == 'POST':
db_form = DbForm(request.POST, databases=databases)
if db_form.is_valid():
database = db_form.cleaned_data['database']
else:
db_form = DbForm(initial={'database': database}, databases=databases)
search_filter = request.GET.get('filter', '')
tables = db.get_tables_meta(database=database, table_names=search_filter) # SparkSql returns []
table_names = [table['name'] for table in tables]
except Exception, e:
raise PopupException(_('Failed to retrieve tables for database: %s' % database), detail=e)
database_meta = db.get_database(database)
if request.REQUEST.get("format", "html") == "json":
resp = JsonResponse({
'status': 0,
'database_meta':
<FILEB>
<CHANGES>
@material(blockid=68, data=[2, 3, 4, 5], transparent=True)
<CHANGEE>
<FILEE>
<FILEB>
# but since ladders can apparently be placed on transparent blocks, we
# have to render this thing anyway. same for data == 2
tex = transform_image_side(raw_texture)
composite.alpha_over(img, tex, (0,6), tex)
return generate_texture_tuple(img, blockID)
if data == 2:
tex = transform_image_side(raw_texture).transpose(Image.FLIP_LEFT_RIGHT)
composite.alpha_over(img, tex, (12,6), tex)
return generate_texture_tuple(img, blockID)
if data == 3:
tex = transform_image_side(raw_texture).transpose(Image.FLIP_LEFT_RIGHT)
composite.alpha_over(img, tex, (0,0), tex)
return generate_texture_tuple(img, blockID)
if data == 4:
tex = transform_image_side(raw_texture)
composite.alpha_over(img, tex, (12,0), tex)
return generate_texture_tuple(img, blockID)
# wall signs
<CHANGES>
@material(blockid=68, data=[2, 3, 4, 5], trasnparent=True)
<CHANGEE>
def wall_sign(blockid, data, north): # wall sign
# first north rotations
if north == 'upper-left':
if data == 2: data = 5
elif data == 3: data = 4
elif data == 4: data = 2
elif data == 5: data = 3
elif north == 'upper-right':
if data == 2: data = 3
elif data == 3: data = 2
elif data == 4: data = 5
elif data == 5: data = 4
<FILEE>
<SCANS>2),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(slice).rectangle((0,0,4,12),outline=(0,0,0,0),fill=(0,0,0,0))
composite.alpha_over(img, slice, (7,5))
composite.alpha_over(img, small_crop, (6,6))
composite.alpha_over(img, small_crop, (7,6))
composite.alpha_over(img, slice, (7,7))
return img
# fire
@material(blockid=51, data=range(16), transparent=True)
def fire(blockid, data):
firetexture = _load_image("fire.png")
side1 = transform_image_side(firetexture)
side2 = transform_image_side(firetexture).transpose(Image.FLIP_LEFT_RIGHT)
img = Image.new("RGBA", (24,24), bgcolor)
composite.alpha_over(img, side1, (12,0), side1)
composite.alpha_over(img, side2, (0,0), side2)
composite.alpha_over(img, side1, (0,6), side1)
composite.alpha_over(img, side2, (12,6), side2)
return img
# monster spawner
block(blockid=52, top_index=34)
# wooden, cobblestone, red brick, stone brick and netherbrick stairs.
@material(blockid=[53,67,108,109,114], data=range(4), transparent=True)
def stairs(blockid, data, north):
# first, north rotations
if north == 'upper-left':
if data == 0: data = 2
elif data == 1: data = 3
elif data == 2: data = 1
elif data == 3: data = 0
elif north == 'upper-right':
if data == 0: data = 1
elif data == 1: data = 0
elif data == 2: data = 3
elif data == 3: data = 2
elif north == 'lower-right':
if data == 0: data = 3
elif data == 1: data = 2
elif data == 2: data = 0
elif data == 3: data = 1
if blockid == 53: # wooden
texture = terrain_images[4]
<FILEB>
<CHANGES>
conn = server.PulpConnection('host', verify_ssl=False)
<CHANGEE>
<FILEE>
<FILEB>
@mock.patch('pulp.bindings.server.httpslib.HTTPSConnection.request')
def test_request_handles_untrusted_server_cert(self, request):
"""Test the request() method when the server is using a certificate that is not signed by a"""
"""trusted certificate authority."""
conn = server.PulpConnection('host')
wrapper = server.HTTPSServerWrapper(conn)
# Let's raise the SSLError with the right string to count as a certificate problem
request.side_effect = SSL.SSLError('oh nos certificate verify failed can you believe it?')
self.assertRaises(exceptions.CertificateVerificationException, wrapper.request, 'GET',
'/awesome/api/', '')
@mock.patch('pulp.bindings.server.httpslib.HTTPSConnection.getresponse')
@mock.patch('pulp.bindings.server.httpslib.HTTPSConnection.request')
@mock.patch('pulp.bindings.server.SSL.Context.__init__',
side_effect=server.SSL.Context.__init__, autospec=True)
@mock.patch('pulp.bindings.server.SSL.Context.set_options', autospec=True)
def test_request_refuses_ssl(self, set_options, Context, request, getresponse):
"""Assert that request() configures m2crypto to refuse to do SSLv2.0 and SSLv3.0."""
"""https://bugzilla.redhat.com/show_bug.cgi?id=1153054"""
<CHANGES>
conn = server.PulpConnection('host', validate_ssl_ca=False)
<CHANGEE>
wrapper = server.HTTPSServerWrapper(conn)
status, body = wrapper.request('GET', '/awesome/api/', '')
ssl_context = Context.mock_calls[0][1][0]
# Don't let the name of this argument scare you. Despite it's misleading name, this means
# that we are willing to do any protocol supported by the openssl installation on this box.
Context.assert_called_once_with(ssl_context, 'sslv23')
# set_options gets called twice. The Context.__init__ calls it with defaults, and then we
# call it again to tell it to not do SSLv2 or SSLv<SCANS> raised, and it was not.')
except exceptions.MissingCAPathException as e:
self.assertEqual(e.args[0], ca_path)
except:
self.fail('The wrong exception type was raised!')
@mock.patch('os.path.isdir', return_value=True)
@mock.patch('pulp.bindings.server.httpslib.HTTPSConnection.getresponse')
@mock.patch('pulp.bindings.server.httpslib.HTTPSConnection.request')
@mock.patch('pulp.bindings.server.SSL.Context.load_verify_locations')
@mock.patch('pulp.bindings.server.SSL.Context.set_verify')
def test_request_with_ca_path_to_dir(self, set_verify, load_verify_locations, request,
getresponse, isdir):
"""Test the request() method when the connection's ca_path setting points to a directory."""
ca_path = '/path/to/an/existing/dir/'
conn = server.PulpConnection('host', verify_ssl=True, ca_path=ca_path)
wrapper = server.HTTPSServerWrapper(conn)
class FakeResponse(object):
"""This class is used to fake the response from httpslib."""
def read(self):
return '{}'
status = 200
getresponse.return_value = FakeResponse()
status, body = wrapper.request('GET', '/awesome/api/', '')
self.assertEqual(status, 200)
self.assertEqual(body, {})
# Make sure the SSL settings are correct
set_verify.assert_called_once_with(SSL.verify_peer, depth=100)
load_verify_locations.assert_called_once_with(capath=ca_path)
@mock.patch('os.path.isfile', return_value=True)
@mock.patch('pulp.bindings.server.httpslib.HTTPSConnection.getresponse')
@mock.patch('pulp.bindings.server.httpslib.HTTPSConnection.request')
@mock.patch('pulp.bindings.server.SSL.Context.load_verify_locations')
@mock.patch('pulp.bindings.server.SSL.Context.set_verify')
<FILEB>
<CHANGES>
ignore_topo = (B.HostFromGpu, B.GpuFromHost, theano.compile.DeepCopyOp)
<CHANGEE>
<FILEE>
<FILEB>
self.floatX = "float32"
# In FAST_COMPILE mode, we force the FAST_RUN mode for optimization.
self.hide_error = theano.config.mode not in ['DebugMode', 'DEBUG_MODE']
self.shared = cuda.shared_constructor
import theano.tensor.tests.test_subtensor
# This is to don't duplicate test.
class T_subtensor(theano.tensor.tests.test_subtensor.T_subtensor):
# This prevents nose from printing method docstrings instead of method
# names
def shortDescription(self):
return None
shared = staticmethod(cuda.shared_constructor)
sub = cuda.GpuSubtensor
inc_sub = cuda.GpuIncSubtensor
adv_sub1 = cuda.GpuAdvancedSubtensor1
adv_incsub1 = cuda.GpuAdvancedIncSubtensor1
mode = mode_with_gpu
dtype = 'float32'
<CHANGES>
ignore_topo = (B.HostFromGpu, B.GpuFromHost)
<CHANGEE>
fast_compile = False
ops = (cuda.GpuSubtensor, cuda.GpuIncSubtensor,
cuda.GpuAdvancedSubtensor1, cuda.GpuAdvancedIncSubtensor1)
def __init__(self, name):
return super(theano.tensor.tests.test_subtensor.T_subtensor,
self).__init__(name)
def test_adv_sub1_fast(self):
"""We check that the special cases of advanced indexing that"""
"""use CudaNdarrayTakeFrom are handled correctly"""
rand = numpy.random.rand
# The variable fast is used to set the member perform_using_take of
# the Op. It is only useful for testing that we use the fast
<FILEE>
<SCANS>import time
import unittest
from theano.compile.pfunc import pfunc
from theano import tensor
import numpy
import theano
import theano.tensor as T
# Skip test if cuda_ndarray is not available.
from nose.plugins.skip import SkipTest
import theano.sandbox.cuda as cuda_ndarray
if cuda_ndarray.cuda_available == False:
raise SkipTest('Optional package cuda disabled')
from theano.gof.python25 import any
import theano.sandbox.cuda as tcn
import theano.sandbox.cuda as cuda
import theano.sandbox.cuda.basic_ops as B
from theano.tensor.basic import _allclose
from theano.tests import unittest_tools as utt
if theano.config.mode == 'FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpu')
def rand_cuda_ndarray(shape):
return cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape),
dtype='float32'))
#intentionally disabled
def tes_use():
tcn.use()
def tensor_pattern_to_gpu_pattern(shape, pattern):
gpu_pattern = [0 for elem in shape]
for idx in pattern:
gpu_pattern[idx] = 1
gpu_pattern = tuple(gpu_pattern)
return gpu_pattern
def test_careduce():
"""test sum pattern 1, 11, 10, 01, 001, 010, 100, 110, 011, 111,"""
"""0011, 0101, 0111, 1011, 1111"""
"""test sum pattern implemented with reshape:"""
"""1000, 0100, 0010, 0001, 11111"""
"""others implemented by reshape that are not tested"""
"""0011,0101,0110,1001,1010,1100"""
"""1110,1101,1011"""
"""TODO: test with broadcast"""
for scalar_op, careduce_op in [
(theano.scalar.mul, tensor.
<FILEB>
<CHANGES>
st = day.astimezone(timezone('UTC')).replace(hour=self.getStartDate().hour, minute=self.getStartDate().minute)
<CHANGEE>
<FILEE>
<FILEB>
entries = self.getEntriesOnDay(day)
if type=="duration":
i=0
while i<len(entries):
entry=entries[i]
if doFit:
if isinstance( entry.getOwner(), SessionSlot ) :
entry.getOwner().fit()
if i+1 == len(entries):
dur=entry.getDuration()
else:
nextentry=entries[i+1]
dur=nextentry.getStartDate()-entry.getStartDate()-diff
if dur<timedelta(0):
raise EntryTimingError( _("""With the time between entries you've chosen, the entry "%s" will have a duration less than zero minutes. Please, choose another time""")%entry.getTitle())
entry.setDuration(dur=dur, check=2)
i+=1
elif type=="startingTime":
<CHANGES>
st = timezone('UTC').localize(datetime(day.year, day.month, day.day, self.getStartDate().hour, self.getStartDate().minute))
<CHANGEE>
for entry in entries:
if doFit:
if isinstance( entry.getOwner(), SessionSlot ) :
entry.getOwner().fit()
entry.setStartDate(st, check=2, moveEntries=1)
st=entry.getEndDate()+diff
elif type=="noAction" and doFit:
for entry in entries:
if isinstance( entry.getOwner(), SessionSlot ) :
entry.getOwner().fit()
class SessionSchedule(TimeSchedule):
def __init__(self,session):
<FILEE>
<SCANS> values["roomName"] = self.getOwnRoom().getName()
else :
values["roomName"] = ""
values["backgroundColor"] = self.getColor()
values["textColor"] = self.getTextColor()
if self.isTextColorToLinks():
values["textcolortolinks"]="True"
return values
def setValues( self, data, check=2, moveEntriesBelow=0, tz='UTC'):
from MaKaC.conference import CustomLocation, CustomRoom
# In order to move the entries below, it is needed to know the diff (we have to move them)
# and the list of entries to move. It's is needed to take those datas in advance because they
# are going to be modified before the moving.
if moveEntriesBelow == 1 and self.getSchedule():
oldStartDate=copy.copy(self.getStartDate())
oldDuration=copy.copy(self.getDuration())
i=self.getSchedule().getEntries().index(self)+1
entriesList = self.getSchedule().getEntries()[i:]
if data.get("startDate", None) != None:
self.setStartDate(data["startDate"], 0)
elif data.get("sYear", None) != None and \
data.get("sMonth", None) != None and \
data.get("sDay", None) != None and \
data.get("sHour", None) != None and \
data.get("sMinute", None) != None:
#########################################
# Fermi timezone awareness #
# We have to store as UTC, relative #
# to the timezone of the conference. #
#########################################
d = timezone(tz).localize(datetime(int(data["sYear"]),
int(data["sMonth"]),
int(data["sDay"]),
int(data["sHour"]),
int(data["sMinute"])))
sDate = d.astimezone(timezone('UTC'))
self.setStartDate(sDate)
########################################
# Fermi timezone awareness #
# We have to store as UTC, relative #
# to the timezone of the conference. #
########################################
if data.get("durTimedelta", None) != None:
self.setDuration(check=0, dur=data["durTimedelta"])
elif data.get("durHours","").strip()!="" and data.get("durMins","").strip()!="":
self.setDuration(data["durHours"], data["durMins"], 0)
else:
h=data.get("durHours","").strip()
m=data.get("durMins","").strip()
force=False
if h!="" or m!="":
h=h or "0"
m=m or "0"
if h!="0" or m!="0":
self.setDuration(int(h), int(m), 0)
else:
force=True
else:
force=True
if force:
if self.getDuration() is
<FILEB>
<CHANGES>
EnsureSiteDirectory(SITE_DIR)
<CHANGEE>
<FILEE>
<FILEB>
try:
return os.stat(path).st_mtime
except: # pylint: disable=W0702
# This error is different depending on the OS, hence no specified type.
return 0
def _SiteDirectoryIsUpToDate():
return _GetModTime(LAST_ROLLED) > _GetModTime(__file__)
def UpdateSiteDirectory():
"""Installs the packages from PACKAGES if they are not already installed."""
"""At this point we must have setuptools in the site directory."""
"""This is intended to be run in a subprocess *prior* to the site directory"""
"""having been added to the parent process as it may cause packages to be"""
"""added and/or removed."""
"""Returns:"""
"""True on success, False otherwise."""
if _SiteDirectoryIsUpToDate():
return True
try:
<CHANGES>
AddSiteDirectory(SITE_DIR)
<CHANGEE>
import pkg_resources
# Determine if any packages actually need installing.
missing_packages = []
for package in [SETUPTOOLS] + list(PACKAGES):
pkg = Package(*package)
req = pkg.GetAsRequirementString()
# It may be that this package is already available in the site
# directory. If so, we can skip past it without trying to install it.
pkg_req = pkg_resources.Requirement.parse(req)
try:
dist = pkg_resources.working_set.find(pkg_req)
if dist:
<FILEE>
<SCANS> ...'
EnsureSiteDirectory(SITE_DIR)
# Download the egg to a temp directory.
dest_dir = tempfile.mkdtemp('depot_tools')
path = None
try:
package = Package(*SETUPTOOLS)
print ' Downloading %s ...' % package.GetFilename()
path = package.DownloadEgg(dest_dir)
except Error:
print ' Download failed!'
shutil.rmtree(dest_dir)
return False
try:
# Load the downloaded egg, and install it to the site directory. Do this
# in a subprocess so as not to pollute this runtime.
pycode = '_LoadSetupToolsFromEggAndInstall(%s)' % repr(path)
if not _RunInSubprocess(pycode):
raise Error()
# Reload our site directory, which should now contain setuptools.
AddSiteDirectory(SITE_DIR)
# Try to import setuptools
import setuptools
except ImportError:
print ' Unable to import setuptools!'
return False
except Error:
# This happens if RunInSubProcess fails, and the appropriate error has
# already been written to stdout.
return False
finally:
# Delete the temp directory.
shutil.rmtree(dest_dir)
return True
def _GetModTime(path):
"""Gets the last modification time associated with |path| in seconds since"""
"""epoch, returning 0 if |path| does not exist."""
continue
except pkg_resources.VersionConflict:
# This happens if another version of the package is already
# installed in another site directory (ie: the system site directory).
pass
missing_packages.append(pkg)
# Install the missing packages.
if missing_packages:
print 'Updating python packages ...'
for pkg in missing_packages:
print ' Installing %s ...' % pkg.GetFilename()
InstallPackage(pkg.GetAsRequirementString(), SITE_DIR)
# Touch the status file so we know that we're up to date next time.
open(LAST_ROLLED, 'wb')
except InstallError, e:
print ' Installation failed: %s' % str(e)
return False
return True
def SetupSiteDirectory():
"""Sets up the site directory, bootstrapping setuptools if necessary."""
"""If this finishes successfully then SITE_DIR will exist and will contain"""
"""the appropriate version of setuptools and all of the packages listed in"""
"""PACKAGES."""
"""This is the main workhorse of this module. Calling this will do everything"""
"""necessary to ensure that you have the desired packages installed in the"""
"""site directory, and the site directory enabled in this process."""
"""Returns:"""
"""True on success, False on failure."""
if _SiteDirectoryIsUpToDate():
AddSiteDirectory(SITE_DIR)
return True
if not _RunInSubprocess('BootstrapSetupTools()'):
return False
if not _RunInSubprocess('UpdateSiteDirectory()'):
return False
# Process the site directory so that the packages within it are available
# for import.
AddSiteDirectory(SITE_DIR)
return True
def Can
<FILEB>
<CHANGES>
if retVal.upper() in kb.keywords or not re.match(r"\A[A-Za-z0-9_@%s\$]+\Z" % ("." if _ else ""), retVal): # MsSQL is the only DBMS where we automatically prepend schema to table name (dot is normal)
<CHANGEE>
<FILEE>
<FILEB>
kb.reflectiveMechanism = False
if not suppressWarning:
debugMsg = "turning off reflection removal mechanism (for optimization purposes)"
logger.debug(debugMsg)
return retVal
def normalizeUnicode(value):
"""Does an ASCII normalization of unicode strings"""
"""Reference: http://www.peterbe.com/plog/unicode-to-ascii"""
return unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') if isinstance(value, unicode) else value
def safeSQLIdentificatorNaming(name, isTable=False):
"""Returns a safe representation of SQL identificator name (internal data format)"""
"""Reference: http://stackoverflow.com/questions/954884/what-special-characters-are-allowed-in-t-sql-column-retVal"""
retVal = name
if isinstance(name, basestring):
retVal = getUnicode(name)
_ = isTable and Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.SYBASE)
if _:
retVal = re.sub(r"(?i)\A%s\." % DEFAULT_MSSQL_SCHEMA, "", retVal)
<CHANGES>
if not re.match(r"\A[A-Za-z0-9_@%s\$]+\Z" % ("." if _ else ""), retVal): # MsSQL is the only DBMS where we automatically prepend schema to table name (dot is normal)
<CHANGEE>
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.ACCESS):
retVal = "`%s`" % retVal.strip("`")
elif Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.PGSQL, DBMS.DB2):
retVal = "\"%s\"" % retVal.strip("\"")
elif Backend.getIdentifiedDbms() in (DBMS.MSSQL,):
retVal = "[%s]" % retVal.strip("[]")
if _ and DEFAULT_MSSQL_SCHEMA not in retVal and '.' not in re.sub(r"\[[^]]+\]", "", retVal):
retVal = "%s.%s" % (DEFAULT_MSSQL_SCHEMA, retVal)
return retVal
def unsafeSQLIdentificatorNaming(name):
"""Extracts identificator<SCANS>://bugs.python.org/issue2275"""
retVal = None
if request and name:
retVal = max(request.get_header(_) if name.upper() == _.upper() else None for _ in request.headers.keys())
return retVal
def isNumber(value):
"""Returns True if the given value is a number-like object"""
try:
float(value)
except:
return False
else:
return True
def zeroDepthSearch(expression, value):
"""Searches occurrences of value inside expression at 0-depth level"""
"""regarding the parentheses"""
retVal = []
depth = 0
for index in xrange(len(expression)):
if expression[index] == '(':
depth += 1
elif expression[index] == ')':
depth -= 1
elif depth == 0 and expression[index:index + len(value)] == value:
retVal.append(index)
return retVal
def splitFields(fields, delimiter=','):
"""Returns list of fields splitted by delimiter"""
fields = fields.replace("%s " % delimiter, delimiter)
commas = [-1, len(fields)]
commas.extend(zeroDepthSearch(fields, ','))
commas = sorted(commas)
return [fields[x + 1:y] for (x, y) in zip(commas, commas[1:])]
def pollProcess(process, suppress_errors=False):
while True:
dataToStdout(".")
time.sleep(1)
returncode = process.poll()
if returncode is not None:
if not suppress_errors:
if returncode == 0:
dataToStdout(" done\n")
elif returncode < 0:
dataToStdout(" process terminated by signal %d\n" % returncode)
elif returncode > 0:
dataToStdout(" quit unexpectedly with return code %d\n" % returncode)
break
<FILEB>
<CHANGES>
EnsureSiteDirectory(SITE_DIR)
<CHANGEE>
<FILEE>
<FILEB>
try:
return os.stat(path).st_mtime
except: # pylint: disable=W0702
# This error is different depending on the OS, hence no specified type.
return 0
def _SiteDirectoryIsUpToDate():
return _GetModTime(LAST_ROLLED) > _GetModTime(__file__)
def UpdateSiteDirectory():
"""Installs the packages from PACKAGES if they are not already installed."""
"""At this point we must have setuptools in the site directory."""
"""This is intended to be run in a subprocess *prior* to the site directory"""
"""having been added to the parent process as it may cause packages to be"""
"""added and/or removed."""
"""Returns:"""
"""True on success, False otherwise."""
if _SiteDirectoryIsUpToDate():
return True
try:
<CHANGES>
AddSiteDirectory(SITE_DIR)
<CHANGEE>
import pkg_resources
# Determine if any packages actually need installing.
missing_packages = []
for package in [SETUPTOOLS] + list(PACKAGES):
pkg = Package(*package)
req = pkg.GetAsRequirementString()
# It may be that this package is already available in the site
# directory. If so, we can skip past it without trying to install it.
pkg_req = pkg_resources.Requirement.parse(req)
try:
dist = pkg_resources.working_set.find(pkg_req)
if dist:
<FILEE>
<SCANS>"""
"""after this returns. If modules are added or deleted this must be called"""
"""again for the changes to be reflected in the runtime."""
"""This calls both AddToPythonPath and site.addsitedir. Both are needed to"""
"""convince easy_install to treat |path| as a site directory."""
AddToPythonPath(path)
site.addsitedir(path) # pylint: disable=E1101
def EnsureSiteDirectory(path):
"""Creates and/or adds the provided path to the runtime as a site directory."""
"""This works like AddSiteDirectory but it will create the directory if it"""
"""does not yet exist."""
"""Raise:"""
"""Error: if the site directory is unable to be created, or if it exists and"""
"""is not a directory."""
if os.path.exists(path):
if not os.path.isdir(path):
raise Error('Path is not a directory: %s' % path)
else:
try:
os.mkdir(path)
except IOError:
raise Error('Unable to create directory: %s' % path)
AddSiteDirectory(path)
def ModuleIsFromPackage(module, package_path):
"""Determines if a module has been imported from a given package."""
"""Args:"""
"""module: the module to test."""
"""package_path: the path to the package to test."""
"""Returns:"""
"""True if |module| has been imported from |package_path|, False otherwise."""
try:
m = os.path.abspath(module.__file__)
p = os.path.abspath(package_path)
if len(m) <= len(p):
return False
if m[0:len(p)] != p:
return False
return m[len(p)] == os.sep
except AttributeError:
return False
def _CaptureStdStreams(function, *args, **kwargs):
"""Captures stdout and stderr while running the provided function."""
"""This only works if |function| only accesses sys.stdout and sys.stderr. If"""
"""we need more than this we'll have to use subprocess.Popen."""
"""Args:"""
"""function: the function to be called."""
"""args: the arguments to pass to |function|."""
"""kwargs: the keyword arguments to pass to |function|."""
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sys.stdout = cStringIO.StringIO()
sys.stderr = cStringIO.StringIO()
try:
return function(*args, **kwargs)
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
def InstallPackage(url_or_req, site_dir):
"""Installs a package to a site directory."""
"""|site_dir| must exist and already be an active site directory. setuptools"""
"""must in the path. Uses easy_install which may involve a download from"""
"""pypi.python.org, so this also requires network access."""
"""Args:"""
"""url_or_req: the package to install, expressed as an URL (may be local
<FILEB>
<CHANGES>
status=status.HTTP_409_CONFLICT)
<CHANGEE>
<FILEE>
<FILEB>
return Response({'error_message': 'Email is not well-formed'}, status=status.HTTP_400_BAD_REQUEST)
except ChUser.DoesNotExist:
if 'password' in request.data:
authenticated_user = authenticate(username=user.username, password=request.data['password'])
if authenticated_user is not None and user == authenticated_user:
# TODO: shouldn't the add_email get a user instead of a profile??
new_email = EmailAddress.objects.add_email(user=user.profile, email=request.data['new_email'])
new_email.set_as_primary()
new_email.save() # TODO: this save might not be necessary
user.email = request.data['new_email']
user.save()
else:
# the authentication system was unable to verify the username and password
return Response(status=status.HTTP_401_UNAUTHORIZED)
else:
return Response({'error_message': 'Password is not present'}, status=status.HTTP_400_BAD_REQUEST)
else:
return Response({'error_message': 'There is already a registered user for this email address'},
<CHANGES>
status=status.HTTP_400_BAD_REQUEST)
<CHANGEE>
else:
return Response({'error_message': 'Email is not present'}, status=status.HTTP_400_BAD_REQUEST)
class UsernameCheckAndGet(APIView):
permission_classes = (permissions.IsAuthenticatedForPutOrGet, permissions.CanGetUsername)
def get_object(self, email):
try:
return ChUser.objects.get(email=email)
except ChUser.DoesNotExist:
raise Http404
def validate_public_name(self, public_name):
if re.match(r'^[0-9a-zA-Z_]{1,20}$', public_name):
return
<FILEE>
<SCANS>objects.get(slug=hive_slug, deleted=False)
except ChHive.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
hive.join(profile)
except IntegrityError:
return Response({'error_message': 'The user was already subscribed to the hive'},
status=status.HTTP_409_CONFLICT)
except UnauthorizedException:
return Response({'error_message': 'The user is expelled from the hive'},
status=status.HTTP_401_UNAUTHORIZED)
# Because I don't want Django Rest Framework to treat it as a serializer in this case, I cast it to a dict
hive_info = dict(serializers.ChHiveSerializer(hive).data)
return Response(hive_info, status=status.HTTP_200_OK)
class ChProfileHiveDetail(APIView):
"""API method: Hive list"""
permission_classes = (permissions.IsAuthenticated, permissions.CanGetHiveList)
def get_object(self, public_name):
try:
return ChProfile.objects.select_related().get(public_name=public_name)
except ChProfile.DoesNotExist:
raise Http404
def delete(self, request, public_name, hive_slug, format=None):
profile = self.get_object(public_name)
try:
# If the user is requesting a join with his own profile then we go on
self.check_object_permissions(self.request, profile)
except PermissionDenied:
return Response(status=status.HTTP_403_FORBIDDEN)
except NotAuthenticated:
return Response(status=status.HTTP_403_FORBIDDEN)
if hive_slug == '':
return Response(status=status.HTTP_400_BAD_REQUEST)
# We get the hive for this hive_slug
try:
hive = ChHive.objects.get(slug=hive_slug, deleted=False)
except ChHive.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
try:
hive.leave(profile)
except IntegrityError:
return Response({'error_message': 'User have not joined the hive'},
status=status.HTTP_409_CONFLICT)
return Response(status=status.HTTP_200_OK)
class ChProfileChatList(APIView):
permission_classes = (permissions.IsAuthenticated, permissions.CanGetChatList)
def get_object(self, public_name):
<FILEB>
<CHANGES>
with self.assertRaises(RuntimeError):
<CHANGEE>
<FILEE>
<FILEB>
pm.initialize()
pm.finalize()
def test_run(self):
mod = self.module()
fn = mod.get_function("sum")
pm = self.pm(mod)
self.pmb().populate(pm)
mod.close()
orig_asm = str(fn)
pm.initialize()
pm.run(fn)
pm.finalize()
opt_asm = str(fn)
# Quick check that optimizations were run
self.assertIn("%.4", orig_asm)
self.assertNotIn("%.4", opt_asm)
class TestDylib(BaseTest):
def test_bad_library(self):
<CHANGES>
with self.assertRaises(Exception):
<CHANGEE>
dylib.load_library_permanently("zzzasdkf;jasd;l")
@unittest.skipUnless(platform.system() in ["Linux", "Darwin"],
"Unsupport test for current OS")
def test_libm(self):
system = platform.system()
if system == "Linux":
libm = find_library("m")
elif system == "Darwin":
libm = find_library("libm")
dylib.load_library_permanently(libm)
if __name__ == "__main__":
unittest.main()
<FILEE>
<SCANS>_triple(self):
mod = self.module()
s = mod.triple
self.assertEqual(s, llvm.get_default_triple())
mod.triple = ''
self.assertEqual(mod.triple, '')
def test_verify(self):
# Verify successful
mod = self.module()
self.assertIs(mod.verify(), None)
# Verify failed
mod = self.module(asm_verification_fail)
with self.assertRaises(RuntimeError) as cm:
mod.verify()
s = str(cm.exception)
self.assertIn("%.bug = add i32 1, %.bug", s)
def test_get_function(self):
mod = self.module()
fn = mod.get_function("sum")
self.assertIsInstance(fn, llvm.ValueRef)
self.assertEqual(fn.name, "sum")
with self.assertRaises(NameError):
mod.get_function("foo")
# Check that fn keeps the module instance alive
del mod
str(fn.module)
def test_get_global_variable(self):
mod = self.module()
gv = mod.get_global_variable("glob")
self.assertIsInstance(gv, llvm.ValueRef)
self.assertEqual(gv.name, "glob")
with self.assertRaises(NameError):
mod.get_global_variable("bar")
# Check that gv keeps the module instance alive
del mod
str(gv.module)
def test_global_variables(self):
mod = self.module()
it = mod.global_variables
del mod
globs = sorted(it, key=lambda value: value.name)
self.assertEqual(len(globs), 3)
self.assertEqual(globs[0].name, "glob")
self.assertEqual(globs[1].name, "glob_f")
self.assertEqual(globs[2].name, "glob_struct")
def test_functions(self):
mod = self.module()
it = mod.functions
del mod
funcs = list(it)
self.assertEqual(len(funcs), 1)
self.assertEqual(funcs[0].name, "sum")
def test_link_in(self):
dest = self.module()
src = self.module(asm_mul)
dest.link_in(src)
self.assertEqual(sorted(f.name for f in dest.functions), ["mul", "sum"])
dest.get_function("mul")
dest.close()
with self.assertRaises(ctypes.ArgumentError):
src.get_function("mul")
def test_link_in_preserve(self):
dest = self.module()
src2 = self.module(asm_mul)
dest.link_in(src2, preserve=True)
self.assertEqual(sorted(f.name for f in dest.functions), ["mul", "sum"])
dest.close()
src2.get_function("mul")
def test_link_in_error(self):
# Raise an error by trying to link two modules with the same global
# definition "sum".
dest = self.module()
src = self.module(asm_sum2)
with self.assertRaises(
<FILEB>
<CHANGES>
X509Extension(b('basicConstraints'), True, b('CA:false'))])
<CHANGEE>
<FILEE>
<FILEB>
request = X509Req()
subject = request.get_subject()
self.assertTrue(
isinstance(subject, X509NameType),
"%r is of type %r, should be %r" % (subject, type(subject), X509NameType))
subject.commonName = "foo"
self.assertEqual(request.get_subject().commonName, "foo")
del request
subject.commonName = "bar"
self.assertEqual(subject.commonName, "bar")
def test_get_subject_wrong_args(self):
request = X509Req()
self.assertRaises(TypeError, request.get_subject, None)
def test_add_extensions(self):
"""L{X509Req.add_extensions} accepts a C{list} of L{X509Extension}"""
"""instances and adds them to the X509 request."""
request = X509Req()
request.add_extensions([
<CHANGES>
X509Extension('basicConstraints', True, 'CA:false')])
<CHANGEE>
# XXX Add get_extensions so the rest of this unit test can be written.
def test_add_extensions_wrong_args(self):
"""L{X509Req.add_extensions} raises L{TypeError} if called with the wrong"""
"""number of arguments or with a non-C{list}. Or it raises L{ValueError}"""
"""if called with a C{list} containing objects other than L{X509Extension}"""
"""instances."""
request = X509Req()
self.assertRaises(TypeError, request.add_extensions)
self.assertRaises(TypeError, request.add_extensions, object())
self.assertRaises(ValueError, request.add_extensions, [object()])
self.assertRaises(TypeError, request.add_extensions, [], None)
class X509Tests(TestCase, _PKeyInteractionTestsMixin):
<FILEE>
<SCANS> datetime.utcnow() + timedelta(seconds=100)
cert.gmtime_adj_notBefore(100)
self.assertEqual(cert.get_notBefore(), b(now.strftime("%Y%m%d%H%M%SZ")))
def test_gmtime_adj_notAfter_wrong_args(self):
"""L{X509Type.gmtime_adj_notAfter} raises L{TypeError} if called with the"""
"""wrong number of arguments or a non-C{int} argument."""
cert = X509()
self.assertRaises(TypeError, cert.gmtime_adj_notAfter)
self.assertRaises(TypeError, cert.gmtime_adj_notAfter, None)
self.assertRaises(TypeError, cert.gmtime_adj_notAfter, 123, None)
def test_gmtime_adj_notAfter(self):
"""L{X509Type.gmtime_adj_notAfter} changes the not-after timestamp to be"""
"""the current time plus the number of seconds passed in."""
cert = load_certificate(FILETYPE_PEM, self.pemData)
now = datetime.utcnow() + timedelta(seconds=100)
cert.gmtime_adj_notAfter(100)
self.assertEqual(cert.get_notAfter(), b(now.strftime("%Y%m%d%H%M%SZ")))
def test_has_expired_wrong_args(self):
"""L{X509Type.has_expired} raises L{TypeError} if called with any"""
"""arguments."""
cert = X509()
self.assertRaises(TypeError, cert.has_expired, None)
def test_has_expired(self):
"""L{X509Type.has_expired} returns C{True} if the certificate's not-after"""
"""time is in the past."""
cert = X509()
cert.gmtime_adj_notAfter(-1)
self.assertTrue(cert.has_expired())
def test_has_not_expired(self):
"""L{X509Type.has_expired} returns C{False} if the certificate's not-after"""
"""time is in the future."""
cert = X509()
cert.gmtime_adj_notAfter(2)
self.assertFalse(cert.has_expired())
def test_digest(self):
"""L{X509.digest} returns a string giving ":"-separated hex-encoded words"""
"""of the digest of the certificate
<FILEB>
<CHANGES>
result = create_submit_request(apiurl, project, p, src_update=src_update)
<CHANGEE>
<FILEE>
<FILEB>
if t:
if target_project == None:
target_project = t
if len(root.findall('entry')) > 1: # This is not really correct, but should work mostly
# Real fix is to ask the api if sources are modificated
# but there is no such call yet.
print("Submitting package ", p)
else:
print(" Skipping not modified package ", p)
continue
else:
print("Skipping package ", p, " since it is a source link pointing inside the project.")
continue
# check for failed source service
_check_service(root)
# submitting this package
if opts.separate_requests or opts.seperate_requests:
# create a single request
<CHANGES>
result = create_submit_request(apiurl, project, p)
<CHANGEE>
if not result:
sys.exit("submit request creation failed")
sr_ids.append(result)
else:
s = """<action type="submit"> <source project="%s" package="%s" /> <target project="%s" package="%s" /> %s </action>""" % \
(project, p, t, p, options_block)
actionxml += s
if actionxml != "":
xml = """<request> %s <state name="new"/> <description>%s</description> </request> """ % \
(actionxml, cgi.escape(opts.message or ""))
u = makeurl(apiurl, ['request'], query='cmd=create&addrevision=1')
f = http_POST(u, data=xml)
<FILEE>
<SCANS>.')
@cmdln.alias("sr")
@cmdln.alias("submitreq")
@cmdln.alias("submitpac")
def do_submitrequest(self, subcmd, opts, *args):
"""${cmd_name}: Create request to submit source into another Project"""
"""[See http://en.opensuse.org/openSUSE:Build_Service_Collaboration for information"""
"""on this topic.]"""
"""See the "request" command for showing and modifing existing requests."""
"""usage:"""
"""osc submitreq [OPTIONS]"""
"""osc submitreq [OPTIONS] DESTPRJ [DESTPKG]"""
"""osc submitreq [OPTIONS] SOURCEPRJ SOURCEPKG DESTPRJ [DESTPKG]"""
"""osc submitpac ... is a shorthand for osc submitreq --cleanup ..."""
"""${cmd_option_list}"""
def _check_service(root):
serviceinfo = root.find('serviceinfo')
if serviceinfo is not None:
# code "running" is ok, because the api will choke when trying
# to create the sr (if it is still running)
if serviceinfo.get('code') not in ('running', 'succeeded'):
print('A service run for package %s %s:'
% (root.get('name'), serviceinfo.get('code')),
file=sys.stderr)
error = serviceinfo.find('error')
if error is not None:
print('\n'.join(error.text.split('\\n')))
sys.exit('\nPlease fix this first')
if opts.cleanup and opts.no_cleanup:
raise oscerr.WrongOptions('\'--cleanup\' and \'--no-cleanup\' are mutually exclusive')
if opts.seperate_requests:
# compatibility option will be removed in the future
print('--seperate-requests is deprecated (use '
'--separate-requests)', file=sys.stderr)
src_update = conf.config['submitrequest_on_accept_action'] or None
# we should check here for home:<id>:branch and default to update, but that would require OBS 1.7 server
if subcmd == 'submitpac' and not opts.no_cleanup:
opts.cleanup = True
if opts.cleanup:
src_update = "cleanup"
elif opts.no_cleanup:
src_update = "update"
elif opts.no_update:
src_update = "noupdate"
myreqs = []
if opts.supersede:
myreqs = [opts.supersede]
args = slash_split(args)
# remove this block later again
oldcmds = ['create', 'list', 'log', 'show', 'decline', 'accept', 'delete', 'revoke']
if args and args[0] in oldcmds:
print("************************************************************************", file=sys.stderr)
print("* WARNING: It looks that you are using this command with a *", file=sys.stderr)
print("* deprecated
<FILEB>
<CHANGES>
tp_query = project.translationproject_set.live() \
<CHANGEE>
<FILEE>
<FILEB>
self.languages = options.pop('languages', [])
# info start
start = datetime.datetime.now()
logging.info('Start running of %s', self.name)
self.handle_all(**options)
# info finish
end = datetime.datetime.now()
logging.info('All done for %s in %s', self.name, end - start)
def handle_all(self, **options):
if options.get("no_rq", False):
set_sync_mode(options.get('noinput', False))
if self.process_disabled_projects:
project_query = Project.objects.all()
else:
project_query = Project.objects.enabled()
if self.projects:
project_query = project_query.filter(code__in=self.projects)
for project in project_query.iterator():
<CHANGES>
tp_query = project.translationproject_set \
<CHANGEE>
.order_by('language__code')
if self.languages:
tp_query = tp_query.filter(language__code__in=self.languages)
for tp in tp_query.iterator():
self.do_translation_project(tp, **options)
class BaseRunCommand(BaseCommand):
"""Base class to build new server runners."""
"""Based on code from `django-shoes"""
"""<https://bitbucket.org/mlzboy/django-shoes/>`_."""
hostport_option_list = (
make_option(
'--host',
<FILEE>
<SCANS>#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import datetime
import logging
from optparse import make_option
from django.core.management.base import BaseCommand, NoArgsCommand
from pootle.runner import set_sync_mode
from pootle_project.models import Project
from pootle_translationproject.models import TranslationProject
class PootleCommand(NoArgsCommand):
"""Base class for handling recursive pootle store management commands."""
shared_option_list = (
make_option(
'--project',
action='append',
dest='projects',
help='Project to refresh',
),
make_option(
'--language',
action='append',
dest='languages',
help='Language to refresh',
),
make_option(
"--noinput",
action="store_true",
default=False,
help=u"Never prompt for input",
),
make_option(
"--no-rq",
action="store_true",
default=False,
help=(u"Run all jobs in a single process, without "
"using rq workers"),
),
)
option_list = NoArgsCommand.option_list + shared_option_list
process_disabled_projects = False
def __init__(self, *args, **kwargs):
self.languages = []
self.projects = []
super(PootleCommand, self).__init__(*args, **kwargs)
def do_translation_project(self, tp, **options):
process_stores = True
if hasattr(self, "handle_translation_project"):
logging.info(u"Running %s over %s", self.name, tp)
try:
process_stores = self.handle_translation_project(tp, **options)
except Exception:
logging.exception(u"Failed to run %s over %s", self.name, tp)
return
if not process_stores:
return
if hasattr(self, "handle_all_stores"):
logging.info(u"Running %s over %s's files", self.name, tp)
try:
self.handle_all_stores(tp, **options)
except Exception:
logging.exception(u"Failed to run %s over %s's files",
self.name, tp)
return
elif hasattr(self, "handle_store"):
store_query = tp.stores.live()
for store in store_query.iterator():
logging.info(u"Running %s over %s",
self.name, store.pootle_path)
try:
self.handle_store(store, **options)
except Exception:
logging.exception(u"Failed to run %s over %s
<FILEB>
<CHANGES>
self.store, state_by_room[room_id], user_id, fallback_to_members=False
<CHANGEE>
<FILEE>
<FILEB>
if msgformat == "org.matrix.custom.html" and formatted_body:
messagevars["body_text_html"] = safe_markup(formatted_body)
elif body:
messagevars["body_text_html"] = safe_text(body)
return messagevars
def add_image_message_vars(self, messagevars, event):
messagevars["image_url"] = event.content["url"]
return messagevars
@defer.inlineCallbacks
def make_summary_text(self, notifs_by_room, state_by_room,
notif_events, user_id, reason):
if len(notifs_by_room) == 1:
# Only one room has new stuff
room_id = notifs_by_room.keys()[0]
# If the room has some kind of name, use it, but we don't
# want the generated-from-names one here otherwise we'll
# end up with, "new message from Bob in the Bob room"
room_name = yield calculate_room_name(
<CHANGES>
state_by_room[room_id], user_id, fallback_to_members=False
<CHANGEE>
)
my_member_event = state_by_room[room_id][("m.room.member", user_id)]
if my_member_event.content["membership"] == "invite":
inviter_member_event = state_by_room[room_id][
("m.room.member", my_member_event.sender)
]
inviter_name = name_from_member_event(inviter_member_event)
if room_name is None:
defer.returnValue(INVITE_FROM_PERSON % {
"person": inviter_name,
"app": self.app_name
})
<FILEE>
<SCANS>]
if raw_to == '':
raise RuntimeError("Invalid 'to' address")
rooms_in_order = deduped_ordered_list(
[pa['room_id'] for pa in push_actions]
)
notif_events = yield self.store.get_events(
[pa['event_id'] for pa in push_actions]
)
notifs_by_room = {}
for pa in push_actions:
notifs_by_room.setdefault(pa["room_id"], []).append(pa)
# collect the current state for all the rooms in which we have
# notifications
state_by_room = {}
try:
user_display_name = yield self.store.get_profile_displayname(
UserID.from_string(user_id).localpart
)
if user_display_name is None:
user_display_name = user_id
except StoreError:
user_display_name = user_id
@defer.inlineCallbacks
def _fetch_room_state(room_id):
room_state = yield self.state_handler.get_current_state_ids(room_id)
state_by_room[room_id] = room_state
# Run at most 3 of these at once: sync does 10 at a time but email
# notifs are much less realtime than sync so we can afford to wait a bit.
yield concurrently_execute(_fetch_room_state, rooms_in_order, 3)
# actually sort our so-called rooms_in_order list, most recent room first
rooms_in_order.sort(
key=lambda r: -(notifs_by_room[r][-1]['received_ts'] or 0)
)
rooms = []
for r in rooms_in_order:
roomvars = yield self.get_room_vars(
r, user_id, notifs_by_room[r], notif_events, state_by_room[r]
)
rooms.append(roomvars)
reason['room_name'] = yield calculate_room_name(
self.store, state_by_room[reason['room_id']], user_id,
fallback_to_members=True
)
summary_text = yield self.make_summary_text(
notifs_by_room, state_by_room, notif_events, user_id, reason
)
template_vars = {
"user_display_name": user_display_name,
"unsubscribe_link": self.make_unsubscribe_link(
user_id, app_id, email_address
),
"summary_text": summary_text,
<FILEB>
<CHANGES>
self.assertContains(response, b"Currently")
<CHANGEE>
<FILEE>
<FILEB>
self.client.logout()
def test_inline_file_upload_edit_validation_error_post(self):
"""Test that inline file uploads correctly display prior data (#10002)."""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": six.text_type(self.picture.id),
"pictures-0-gallery": six.text_type(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post('/test_admin/%s/admin_views/gallery/%d/' % (self.urlbit, self.gallery.id), post_data)
<CHANGES>
self.assertTrue(response._container[0].find("Currently:") > -1)
<CHANGEE>
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminInlineTests(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
<FILEE>
<SCANS>.core.context_processors.i18n',
global_settings.TEMPLATE_CONTEXT_PROCESSORS),
USE_I18N=False,
)
def testLangNamePresent(self):
response = self.client.get('/test_admin/%s/admin_views/' % self.urlbit)
self.assertNotContains(response, ' lang=""')
self.assertNotContains(response, ' xml:lang=""')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
USE_THOUSAND_SEPARATOR=True, USE_L10N=True)
class DateHierarchyTests(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
formats.reset_format_cache()
def assert_non_localized_year(self, response, year):
"""Ensure that the year is not localized with"""
"""USE_THOUSAND_SEPARATOR. Refs #15234."""
self.assertNotContains(response, formats.number_format(year))
def assert_contains_year_link(self, response, date):
self.assertContains(response, '?release_date__year=%d"' % (date.year,))
def assert_contains_month_link(self, response, date):
self.assertContains(
response, '?release_date__month=%d&release_date__year=%d"' % (
date.month, date.year))
def assert_contains_day_link(self, response, date):
self.assertContains(
response, '?release_date__day=%d&'
'release_date__month=%d&release_date__year=%d"' % (
date.day, date.month, date.year))
def test_empty(self):
"""Ensure that no date hierarchy links display with empty changelist."""
response = self.client.get(
reverse('admin:admin_views_podcast_changelist'))
self.assertNotContains(response, 'release_date__year=')
self.assertNotContains(response, 'release_date__month=')
self.assertNotContains(response, 'release_date__day=')
def test_single(self):
"""Ensure that single day-level date hierarchy appears for single object."""
DATE = datetime.date(2000, 6, 30)
Podcast.objects.create(release_date=DATE)
url = reverse('admin:admin_views_podcast_changelist
<FILEB>
<CHANGES>
if currency.company_id.id!= company_id:
<CHANGEE>
<FILEE>
<FILEB>
}
}
if type in ('in_invoice', 'in_refund'):
result['value']['partner_bank'] = bank_id
if payment_term != partner_payment_term:
if partner_payment_term:
to_update = self.onchange_payment_term_date_invoice(
cr,uid,ids,partner_payment_term,date_invoice)
result['value'].update(to_update['value'])
else:
result['value']['date_due'] = False
if partner_bank_id != bank_id:
to_update = self.onchange_partner_bank(cr, uid, ids, bank_id)
result['value'].update(to_update['value'])
return result
def onchange_currency_id(self, cr, uid, ids, curr_id, company_id):
if curr_id:
currency = self.pool.get('res.currency').browse(cr, uid, curr_id)
<CHANGES>
if currency.company_id != company_id:
<CHANGEE>
raise osv.except_osv(_('Configration Error !'),
_('Can not select currency that is not related to current company.\nPlease select accordingly !.'))
return {}
def onchange_payment_term_date_invoice(self, cr, uid, ids, payment_term_id, date_invoice):
if not payment_term_id:
return {}
res={}
pt_obj= self.pool.get('account.payment.term')
if not date_invoice :
date_invoice = time.strftime('%Y-%m-%d')
pterm_list = pt_obj.compute(cr, uid, payment_term_id, value=1, date_ref=date_invoice)
if pterm_list:
<FILEE>
<SCANS>('%Y-%m-%d')}, round=False)
val['account_id'] = tax['account_paid_id'] or line.account_id.id
key = (val['tax_code_id'], val['base_code_id'], val['account_id'])
if not key in tax_grouped:
tax_grouped[key] = val
else:
tax_grouped[key]['amount'] += val['amount']
tax_grouped[key]['base'] += val['base']
tax_grouped[key]['base_amount'] += val['base_amount']
tax_grouped[key]['tax_amount'] += val['tax_amount']
for t in tax_grouped.values():
t['amount'] = cur_obj.round(cr, uid, cur, t['amount'])
t['base_amount'] = cur_obj.round(cr, uid, cur, t['base_amount'])
t['tax_amount'] = cur_obj.round(cr, uid, cur, t['tax_amount'])
return tax_grouped
def move_line_get(self, cr, uid, invoice_id):
res = []
cr.execute('SELECT * FROM account_invoice_tax WHERE invoice_id=%s', (invoice_id,))
for t in cr.dictfetchall():
if not t['amount'] \
and not t['tax_code_id'] \
and not t['tax_amount']:
continue
res.append({
'type':'tax',
'name':t['name'],
'price_unit': t['amount'],
'quantity': 1,
'price': t['amount'] or 0.0,
'account_id': t['account_id'],
'tax_code_id': t['tax_code_id'],
'tax_amount': t['tax_amount']
})
return res
account_invoice_tax()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
<FILEB>
<CHANGES>
if (isinstance(v.dtype, type) and issubclass(v.dtype, basestring)) or v.dtype.char == 'S':
<CHANGEE>
<FILEE>
<FILEB>
"""(except for boundary variables defined in Section 7.1, "Cell Boundaries" and climatology variables"""
"""defined in Section 7.4, "Climatological Statistics")."""
"""Units are not required for dimensionless quantities. A variable with no units attribute is assumed"""
"""to be dimensionless. However, a units attribute specifying a dimensionless unit may optionally be"""
"""included."""
"""- units required"""
"""- type must be recognized by udunits"""
"""- if std name specified, must be consistent with standard name table, must also be consistent with a"""
"""specified cell_methods attribute if present"""
ret_val = []
deprecated = ['level', 'layer', 'sigma_level']
for k, v in ds.dataset.variables.iteritems():
# skip climatological vars, boundary vars
if v in self._find_clim_vars(ds) or \
v in self._find_boundary_vars(ds).itervalues() or \
v.shape == ():
continue
# skip string type vars
<CHANGES>
if v.dtype.char == 'S':
<CHANGEE>
continue
# skip quality control vars
if hasattr(v, 'flag_meanings'):
continue
if hasattr(v, 'standard_name') and 'status_flag' in v.standard_name:
continue
# skip DSG cf_role
if hasattr(v, "cf_role"):
continue
units = getattr(v, 'units', None)
# 1) "units" attribute must be present
presence = Result(BaseCheck.HIGH, units is not None, ('units', k, 'present'))
<FILEE>
<SCANS>platform_name', 'station_name', 'instrument_name')
in_table = std_name in self._std_names
if not is_str:
msgs.append("The standard name '%s' is not of type string. It is type %s" % (std_name, type(std_name)))
if not in_table and not in_exception:
msgs.append("The standard name '%s' is not in standard name table" % std_name)
ret_val.append(Result(BaseCheck.HIGH, is_str and in_table, ('std_name', k, 'legal'), msgs))
# 2) optional - if modifiers, should be in table
if std_name_modifier:
allowed = ['detection_minimum',
'number_of_observations',
'standard_error',
'status_flag']
msgs = []
if not std_name_modifier in allowed:
msgs.append("modifier (%s) not allowed" % std_name_modifier)
ret_val.append(Result(BaseCheck.HIGH, std_name_modifier in allowed, ('std_name', k, 'modifier'), msgs))
return ret_val
def check_ancillary_data(self, ds):
"""3.4 It is a string attribute whose value is a blank separated list of variable names."""
"""The nature of the relationship between variables associated via ancillary_variables must"""
"""be determined by other attributes. The variables listed by the ancillary_variables attribute"""
"""will often have the standard name of the variable which points to them including a modifier"""
"""(Appendix C, Standard Name Modifiers) to indicate the relationship."""
ret_val = []
for k, v in ds.dataset.variables.iteritems():
anc = getattr(v, 'ancillary_variables', None)
if anc is None:
continue
# should be a string, splittable, and each should exist
anc_result = Result(BaseCheck.HIGH, name=('ancillary', k))
msgs = []
if not isinstance(anc, basestring):
anc_result.value = False
anc_result.msgs = ["ancillary_variables is not a string"]
ret_val.append(anc_result)
continue
ancs = anc.split()
existing = 0
for a in ancs:
if a in ds.dataset.variables:
existing += 1
else:
msgs.append("ancillary var %s does not exist" % a)
anc_result.value = (existing, len(ancs))
anc_result.msgs = msgs
ret_val.append(anc_result)
return ret_val
def check_flags(self, ds):
"""3.5 The attributes flag_values, flag_masks and flag_meanings are intended to make variables"""
"""that contain flag values self describing. Status codes and Boolean (binary) condition flags may be"""
<FILEB>
<CHANGES>
self._columns['o2target'].readonly = not (user in user_ids)
<CHANGEE>
<FILEE>
<FILEB>
_table = "nh_clinical_wardboard"
_trend_strings = [('up', 'up'), ('down', 'down'), ('same', 'same'), ('none', 'none'), ('one', 'one')]
_rec_name = 'full_name'
def _get_logo(self, cr, uid, ids, fields_name, arg, context=None):
res = {}
for board in self.browse(cr, uid, ids, context=context):
res[board.id] = board.patient_id.partner_id.company_id.logo
return res
_clinical_risk_selection = [['NoScore', 'No Score Yet'],
['High', 'High Risk'],
['Medium', 'Medium Risk'],
['Low', 'Low Risk'],
['None', 'No Risk']]
_boolean_selection = [('yes', 'Yes'),
('no', 'No')]
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
user_pool = self.pool['res.users']
user_ids = user_pool.search(cr, user, [['groups_id.name', 'in', ['NH Clinical Doctor Group']]], context=context)
<CHANGES>
self._co.umsn['o2target'].readonly = not (user in user_ids)
<CHANGEE>
res = super(nh_clinical_wardboard, self).fields_view_get(cr, user, view_id, view_type, context, toolbar, submenu)
return res
def _get_started_device_session_ids(self, cr, uid, ids, field_name, arg, context=None):
res = {}.fromkeys(ids, False)
sql = """select spell_id, ids """
"""from wb_activity_data """
"""where data_model='nh.clinical.device.session' """
"""and state in ('started') and spell_id in (%s)""" % ", ".join([str(spell_id) for spell_id in ids])
cr.execute(sql)
res.update({r['spell_id']: r['ids'] for r in cr.dictfetchall()})<SCANS>(cr, uid, spell_id, context=context)
device_activity_id = self.pool['nh.clinical.device.session'].create_activity(cr, uid,
{'parent_id': spell.activity_id.id},
{'patient_id': wiz.patient_id.id,
'device_type_id': wiz.device_type_id.id,
'device_id': wiz.device_id.id if wiz.device_id else False})
self.pool['nh.activity'].start(cr, uid, device_activity_id, context)
self.pool['nh.activity'].submit(cr, uid, device_activity_id, {'location': wiz.location}, context)
class wardboard_device_session_complete(orm.TransientModel):
_name = "wardboard.device.session.complete"
_columns = {
'session_id': fields.many2one('nh.clinical.device.session', 'Session'),
'removal_reason': fields.char('Removal reason', size=100),
'planned': fields.selection((('planned', 'Planned'), ('unplanned', 'Unplanned')), 'Planned?')
}
def do_complete(self, cr, uid, ids, context=None):
activity_pool = self.pool['nh.activity']
wiz = self.browse(cr, uid, ids[0])
activity_pool.submit(cr, uid, wiz.session_id.activity_id.id, {'removal_reason': wiz.removal_reason, 'planned': wiz.planned}, context)
activity_pool.complete(cr, uid, wiz.session_id.activity_id.id, context)
# refreshing view
spell_activity_id = wiz.session_id.activity_id.parent_id.id
wardboard_pool = self.pool['nh.clinical.wardboard']
wardboard_id = wardboard_pool.search(cr, uid, [['spell_activity_id', '=', spell_activity_id]])[0]
view_id = self.pool['ir.model.data'].get_object_reference(cr,
<FILEB>
<CHANGES>
os.chdir(sys.prefix)
<CHANGEE>
<FILEE>
<FILEB>
from .._vendor.boltons.setutils import IndexedSet
from ..common.compat import NoneType, iteritems, itervalues, odict, on_win, string_types
from ..common.configuration import (Configuration, LoadError, MapParameter, PrimitiveParameter,
SequenceParameter, ValidationError)
from ..common.disk import conda_bld_ensure_dir
from ..common.path import expand
from ..common.platform import linux_get_libc_version
from ..common.url import has_scheme, path_to_url, split_scheme_auth_token
try:
from cytoolz.itertoolz import concat, concatv, unique
except ImportError: # pragma: no cover
from .._vendor.toolz.itertoolz import concat, concatv, unique
try:
os.getcwd()
except (IOError, OSError) as e:
if e.errno == ENOENT:
# FileNotFoundError can occur when cwd has been deleted out from underneath the process.
# To resolve #6584, let's go with setting cwd to $HOME, and see how far we get.
<CHANGES>
os.chdir(expand('~'))
<CHANGEE>
else:
raise
log = getLogger(__name__)
_platform_map = {
'linux2': 'linux',
'linux': 'linux',
'darwin': 'osx',
'win32': 'win',
'zos': 'zos',
}
non_x86_linux_machines = {
'armv6l',
<FILEE>
<SCANS> import NotWritableError
raise NotWritableError(context.envs_dirs[0], None)
# backward compatibility for conda-build
def get_prefix(ctx, args, search=True): # pragma: no cover
return determine_target_prefix(ctx or context, args)
try:
context = Context((), None)
except LoadError as e: # pragma: no cover
print(e, file=sys.stderr)
# Exception handler isn't loaded so use sys.exit
sys.exit(1)
<FILEB>
<CHANGES>
data_path in [os.path.join(temp_dir_, f) for f in os.listdir(temp_dir_)]:
<CHANGEE>
<FILEE>
<FILEB>
var = ncvar[0, 0, :, :]
else:
log.error(
"After processing, the shape of the mask variable is %s which cannot be applied to time slices" % str(
ncvar.shape))
return
f, v = masks[name]["operator"], masks[name]["rhs"]
func = numpy.vectorize(lambda x: f(x, v))
masks[name]["array"] = func(var[:, :])
finally:
dataset.close()
# Deletes all temporary paths and removes temp directory
def clean_tmp_data(tasks):
global temp_dir_, ifs_gridpoint_file_, ifs_spectral_file_
for task in tasks:
for key in [cmor_task.filter_output_key, cmor_task.output_path_key]:
data_path = getattr(task, key, None)
if data_path is not None and data_path not in [ifs_spectral_file_, ifs_gridpoint_file_] and \
<CHANGES>
data_path in os.listdir(temp_dir_):
<CHANGEE>
os.remove(data_path)
delattr(task, cmor_task.output_path_key)
if not any(os.listdir(temp_dir_)):
os.rmdir(temp_dir_)
temp_dir_ = os.getcwd()
else:
log.warning("Skipped removal of nonempty work directory %s" % temp_dir_)
# Creates a sub-list of tasks that we believe we can successfully process
def filter_tasks(tasks):
global log
log.info("Inspecting %d tasks." % len(tasks))
result = []
<FILEE>
<SCANS>files) > 1 or len(shfiles) > 1:
# TODO: Support postprocessing over multiple files
log.warning("Expected a single grid point and spectral file in %s, found %s and %s; \
will take first file of each list." % (path, str(gpfiles), str(shfiles)))
ifs_gridpoint_file_ = gpfiles[0] if len(gpfiles) > 0 else None
ifs_spectral_file_ = shfiles[0] if len(shfiles) > 0 else None
if any(inifiles):
ifs_init_gridpoint_file_ = inifiles[0]
if len(inifiles) > 1:
log.warning("Multiple initial gridpoint files found, will proceed with %s" % ifs_init_gridpoint_file_)
else:
ifs_init_gridpoint_file_ = ifs_gridpoint_file_
tmpdir_parent = os.getcwd() if tempdir is None else tempdir
dirname = exp_name_ + start_date_.strftime("-ifs-%Y%m")
temp_dir_ = os.path.join(tmpdir_parent, dirname)
if os.path.exists(temp_dir_):
if any(os.listdir(temp_dir_)):
log.warning("Requested temporary directory %s already exists and is nonempty..." % temp_dir_)
temp_dir_ = tempfile.mkdtemp(prefix=dirname + '-', dir=tmpdir_parent)
log.warning("generated new temporary directory %s" % temp_dir_)
else:
os.makedirs(temp_dir_)
max_size_ = maxsizegb
if autofilter:
grib_filter.initialize(ifs_gridpoint_file_, ifs_spectral_file_, temp_dir_)
return True
# Execute the postprocessing+cmorization tasks. First masks, then surface pressures, then regular tasks.
def execute(tasks, cleanup=True, autofilter=True, nthreads=1):
global log, start_date_, ifs_grid_descr_
supported_tasks = [t for t in filter_tasks(tasks) if t.status == cmor_task.status_initialized]
log.info("Executing %d IFS tasks..." % len(supported_tasks))
mask_tasks = get_mask_tasks(supported_tasks)
surf_pressure_tasks = get_sp_tasks(supported_tasks, autofilter)
regular_tasks = [t for t in supported_tasks if t not in surf_pressure_tasks]
tasks_todo = mask_tasks + surf_pressure_tasks + regular_tasks
grid_descr_file = None
if autofilter:
tasks_todo = grib_filter.execute(tasks_todo, start_date_.month)
for t in tasks
<FILEB>
<CHANGES>
import cdutil, genutil, sys, os, cdms2, MV2, time
<CHANGEE>
<FILEE>
<FILEB>
'''Created on Jul 30, 2013'''
'''@author: tpmaxwel'''
'''Created on Jul 25, 2013'''
'''@author: tpmaxwel'''
<CHANGES>
import cdutil, genutil, sys, os, cdms2, MV2
<CHANGEE>
def make_corners(lat,lon,fname=0):
"""This func computes corners by extrapolating cell center positions."""
"""INPUTS:"""
"""lat : 2d array of lat values (degrees)"""
"""lon : 2d array of lon values (degrees). size(lat) must = size(lon)"""
"""fname: name of file to write this stuff to. Since this function"""
"""runs fast, saving isn't so important. Set this to zero or"""
"""omit it and no file will be written."""
"""OUTPUTS:"""
"""lat_corners"""
"""lon_corners"""
"""NOTES: 4-3"""
<FILEE>
<SCANS>
if hasattr( Var, "coordinates" ):
axis_ids = Var.coordinates.strip().split(' ')
lat_d01 = file( axis_ids[1], squeeze=1 )
lon_d01 = file( axis_ids[0], squeeze=1 )
elif hasattr( Var, "stagger" ):
stagger = Var.stagger.strip()
lat_d01 = file( "XLAT_%s" % stagger, squeeze=1 )
lon_d01 = file( "XLONG_%s" % stagger, squeeze=1 )
else:
lat_d01 = file( "XLAT", squeeze=1 )
lon_d01 = file( "XLONG", squeeze=1 )
lat_corners, lon_corners, roi = make_corners( lat_d01, lon_d01 )
ni,nj = lat_d01.shape
iaxis = TransientVirtualAxis("i", ni)
jaxis = TransientVirtualAxis("j", nj)
lataxis = TransientAxis2D(lat_d01, axes=(iaxis, jaxis), bounds=lat_corners, attributes={'units':'degrees_east'}, id="latitude")
lonaxis = TransientAxis2D(lon_d01, axes=(iaxis, jaxis), bounds=lon_corners, attributes={'units':'degrees_north'}, id="longitude")
grid = TransientCurveGrid( lataxis, lonaxis, id='WRF_inner' )
if levaxis:
levaxis.designateLevel()
tVar = cdms2.createVariable( Var, axes=( levaxis, grid ), id=var.id, typecode=Var.typecode() )
else:
tVar = cdms2.createVariable( Var, axes=( grid, ), id=var.id, typecode=Var.typecode() )
a=tVar.getAxis(0)
a.name = 'Latitude'
b=tVar.getAxis(1)
b.name = 'Longitude'
dims = lat_d01.shape if ( lat_d01.MemoryOrder == 'XY' ) else [ lat_d01.shape[1], lat_d01.shape[0] ]
lon0 = roi[0]
dlon = ( roi[1] - roi[0] ) / dims[0]
lat0 = roi[2]
dlat = ( roi[3] - roi[2] ) / dims[1]
tg0 = time.clock()
lat_lon_grid = cdms2.createUniformGrid( lat0, dims[1], dlat, lon0, dims[0], dlon )
regrid_Var = tVar.regrid( lat_lon_grid, regridTool = 'libcf', regridMethod = 'linear' )
tg1 = time.clock()
print "Regrid required %.2f secs." % ( tg1-tg0 )
print "WRF data processing required %.2f secs." % ( tg1-tr1 )
return regrid_Var
<FILEB>
<CHANGES>
from Numeric import array, ravel, reshape, shape, alltrue, sometrue
<CHANGEE>
<FILEE>
<FILEB>
<CHANGES>
from Numeric import array, ravel, reshape, shape, alltrue
<CHANGEE>
from Numeric import Int8, UInt8, Int16, UInt16, Int32, UInt32, \
Float32, Float64, Complex32, Complex64, Float, Int, Complex
from matplotlib._isnan import isnan64 as _isnan
class _TypeNamespace:
"""Numeric compatible type aliases for use with extension functions."""
Int8 = Int8
UInt8 = UInt8
Int16 = Int16
UInt16 = UInt16
Int32 = Int32
UInt32 = UInt32
Float32 = Float32
<FILEE>
<SCANS> Float64 = Float64
Complex32 = Complex32
Complex64 = Complex64
nx = _TypeNamespace()
def isnan(a):
"""y = isnan(x) returns True where x is Not-A-Number"""
return reshape(array([_isnan(i) for i in ravel(a)],'b'), shape(a))
def all(a, axis=None):
'''Numpy-compatible version of all()'''
if axis is None:
return alltrue(ravel(a))
else:
return alltrue(a, axis)
def any(a, axis=None):
if axis is None:
return sometrue(ravel(a))
else:
return sometrue(a, axis)
# inf is useful for testing infinities in results of array divisions
# (which don't raise exceptions)
inf = infty = infinity = Infinity = (array([1])/0.0)[0]
<FILEB>
<CHANGES>
print "progress [%s] %s" % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), s)
<CHANGEE>
<FILEE>
<FILEB>
if sign == "-":
ret *= -1
return ret
def get_patchname(patch):
ret = []
if patch.attributes['inverted'] == 'True':
ret.append("UNDO: ")
ret.append(i.getElementsByTagName("name")[0].childNodes[0].data)
lines = i.getElementsByTagName("comment")
if lines:
ret.extend(["\n", lines[0].childNodes[0].data])
return "".join(ret).encode('utf-8')
def get_author(patch):
author = patch.attributes['author'].value
if not ">" in author:
author = "%s <%s>" % (author.split('@')[0], author)
return author.encode('utf-8')
def progress(s):
<CHANGES>
print "progress %s" % s
<CHANGEE>
sys.stdout.flush()
origin = os.path.abspath(sys.argv[1])
working = "%s.darcs" % origin
progress("getting list of patches")
sock = os.popen("darcs changes --xml --reverse --repo %s" % origin)
buf = sock.read()
# this is hackish. we need to escape some bad chars, otherwise the xml
# will not be valid
buf = buf.replace('\x1b', '^]')
sock.close()
try:
xmldoc = xml.dom.minidom.parseString(buf)
<FILEE>
<SCANS>#!/usr/bin/env python
import xml.dom.minidom
import xml.parsers.expat
import os
import sys
import gzip
import time
import shutil
sys = reload(sys)
sys.setdefaultencoding("utf-8")
def __get_zone():
now = time.localtime()
if time.daylight and now[-1]:
offset = time.altzone
else:
offset = time.timezone
hours, minutes = divmod(abs(offset), 3600)
if offset > 0:
sign = "-"
else:
sign = "+"
return sign, hours, minutes
def get_zone_str():
sign, hours, minutes = __get_zone()
return "%s%02d%02d" % (sign, hours, minutes // 60)
def get_zone_int():
sign, hours, minutes = __get_zone()
ret = hours*3600+minutes*60
except xml.parsers.expat.ExpatError:
import chardet
progress("encoding is not utf8, guessing charset")
encoding = chardet.detect(buf)['encoding']
progress("detected encoding is %s" % encoding)
xmldoc = xml.dom.minidom.parseString(unicode(buf, encoding).encode('utf-8'))
sys.stdout.flush()
# init the tmp darcs repo
os.mkdir(working)
cwd = os.getcwd()
os.chdir(working)
os.system("darcs init --old-fashioned-inventory")
patches = xmldoc.getElementsByTagName('patch')
# this may be huge and we need it many times
patchnum = len(patches)
count = 0
for i in patches:
# apply the patch
buf = ["\nNew patches:\n"]
sock = gzip.open("%s/_darcs/patches/%s" % (origin, i.attributes['hash'].value))
buf.append(sock.read())
sock.close()
sock = os.popen("darcs changes --context")
buf.append(sock.read())
sock.close()
sock = os.popen("darcs apply --allow-conflicts >/dev/null", "w")
sock.write("".join(buf))
sock.close()
message = get_patchname(i)
# export the commit
print "commit refs/heads/master"
print "mark :%s" % count
date = int(time.mktime(time.strptime(i.attributes['date'].value, "%Y%m%d%H%M%S"))) + get_zone_int()
print "committer %s %s %s" % (get_author(i), date, get_zone_str())
print "data %d\n%s" % (len(
<FILEB>
<CHANGES>
if self.is_mine(addr):
<CHANGEE>
<FILEE>
<FILEB>
except:
raise BaseException(upgrade_msg)
self.update_tx_history()
if self.seed_version != SEED_VERSION:
raise BaseException(upgrade_msg)
return True
def get_new_address(self):
n = 0
for addr in self.addresses[-self.gap_limit:]:
if not self.history.get(addr):
n = n + 1
if n < self.gap_limit:
new_address = self.create_new_address2(False)
self.history[new_address] = [] #get from server
return True, new_address
else:
return False, "The last %d addresses in your list have never been used. You should use them first, or increase the allowed gap size in your preferences. "%self.gap_limit
def get_addr_balance(self, addr):
<CHANGES>
if addr in self.addresses:
<CHANGEE>
h = self.history.get(addr)
else:
h = self.interface.retrieve_history(addr)
if not h: return 0,0
c = u = 0
for item in h:
v = item['value']
if item['height']:
c += v
else:
u += v
return c, u
<FILEE>
<SCANS> wallet_path):
if wallet_path is not None:
self.path = wallet_path
else:
# backward compatibility: look for wallet file in the default data directory
if "HOME" in os.environ:
wallet_dir = os.path.join( os.environ["HOME"], '.electrum')
elif "LOCALAPPDATA" in os.environ:
wallet_dir = os.path.join( os.environ["LOCALAPPDATA"], 'Electrum' )
elif "APPDATA" in os.environ:
wallet_dir = os.path.join( os.environ["APPDATA"], 'Electrum' )
else:
raise BaseException("No home directory found in environment variables.")
if not os.path.exists( wallet_dir ): os.mkdir( wallet_dir )
self.path = os.path.join( wallet_dir, 'electrum.dat' )
def import_key(self, keypair, password):
address, key = keypair.split(':')
if not self.is_valid(address): return False
b = ASecretToSecret( key )
if not b: return False
secexp = int( b.encode('hex'), 16)
private_key = ecdsa.SigningKey.from_secret_exponent( secexp, curve=SECP256k1 )
# sanity check
public_key = private_key.get_verifying_key()
if not address == public_key_to_bc_address( '04'.decode('hex') + public_key.to_string() ): return False
self.imported_keys[address] = self.pw_encode( key, password )
return True
def new_seed(self, password):
seed = "%032x"%ecdsa.util.randrange( pow(2,128) )
self.init_mpk(seed)
# encrypt
self.seed = wallet.pw_encode( seed, password )
def init_mpk(self,seed):
# public key
curve = SECP256k1
secexp = self.stretch_key(seed)
master_private_key = ecdsa.SigningKey.from_secret_exponent( secexp, curve = SECP256k1 )
self.master_public_key = master_private_key.get_verifying_key().to_string()
def all_addresses(self):
return self.addresses + self.change_addresses + self.imported_keys.keys()
def is_mine(self, address):
return address in self.all_addresses()
def is_change(self, address):
return address in self.change_addresses
def is_valid(self,addr):
ADDRESS_RE = re.compile('[1-9A-HJ-NP-Za-km-z]{26,}\\Z')
if not ADDRESS_RE.match(addr): return False
try:
h = bc_address_to_hash_160(addr)
except:
return False
return addr == hash_160_to_bc_address(h)
def stretch_key(self,seed):
oldseed = seed
for i in range(100000
<FILEB>
<CHANGES>
return web.safestr(self._creator())
<CHANGEE>
<FILEE>
<FILEB>
value = (translations and translations.ugettext(string)) or string
if args:
value = value % args
elif kwargs:
value = value % kwargs
return value
def __getattr__(self, key):
from infogami.utils.i18n import strings
# for backward-compatability
return strings.get('', key)
class LazyGetText:
def __call__(self, string, *args, **kwargs):
"""Translate a given string lazily."""
return LazyObject(lambda: GetText()(string, *args, **kwargs))
class LazyObject:
def __init__(self, creator):
self._creator = creator
def __str__(self):
<CHANGES>
return str(self._creator())
<CHANGEE>
def __repr__(self):
return repr(self._creator())
def __add__(self, other):
return self._creator() + other
def __radd__(self, other):
return other + self._creator()
gettext = GetText()
lgettext = LazyGetText()
_ = gettext
<FILEE>
<SCANS>
copyright_holder='Internet Archive'
)
METHODS = [
("**.py", "python"),
("**.html", "openlibrary.i18n:extract_templetor")
]
COMMENT_TAGS = ["NOTE:"]
for d in dirs:
extracted = extract_from_dir(d, METHODS, comment_tags=COMMENT_TAGS, strip_comment_tags=True)
for filename, lineno, message, comments in extracted:
catalog.add(message, None, [(filename, lineno)], auto_comments=comments)
path = os.path.join(root, 'messages.pot')
f = open(path, 'w')
write_po(f, catalog)
f.close()
print 'wrote template to', path
def compile_translations():
for locale in get_locales():
po_path = os.path.join(root, locale, 'messages.po')
mo_path = os.path.join(root, locale, 'messages.mo')
if os.path.exists(po_path):
_compile_translation(po_path, mo_path)
def update_translations():
pot_path = os.path.join(root, 'messages.pot')
template = read_po(open(pot_path))
for locale in get_locales():
po_path = os.path.join(root, locale, 'messages.po')
mo_path = os.path.join(root, locale, 'messages.mo')
if os.path.exists(po_path):
catalog = read_po(open(po_path))
catalog.update(template)
f = open(po_path, 'w')
write_po(f, catalog)
f.close()
print 'updated', po_path
compile_translations()
@web.memoize
def load_translations(lang):
po = os.path.join(root, lang, 'messages.po')
mo_path = os.path.join(root, lang, 'messages.mo')
if os.path.exists(mo_path):
return Translations(open(mo_path))
class GetText:
def __call__(self, string, *args, **kwargs):
"""Translate a given string to the language of the current locale."""
translations = load_translations(web.ctx.get('lang', 'en'))
<FILEB>
<CHANGES>
log.warning("Repositories not found in {}".format(repos))
<CHANGEE>
<FILEE>
<FILEB>
sorted_versions = sorted(
[LooseVersion(x) for x in byrepo_ret[pkgname]], reverse=True
)
byrepo_ret[pkgname] = [x.vstring for x in sorted_versions]
return byrepo_ret
def _get_configured_repos(root=None):
"""Get all the info about repositories from the configurations."""
repos = os.path.join(root, os.path.relpath(REPOS, os.path.sep)) if root else REPOS
repos_cfg = configparser.ConfigParser()
if os.path.exists(repos):
repos_cfg.read(
[
repos + "/" + fname
for fname in os.listdir(repos)
if fname.endswith(".repo")
]
)
else:
<CHANGES>
log.error("Repositories not found in {}".format(repos))
<CHANGEE>
return repos_cfg
def _get_repo_info(alias, repos_cfg=None, root=None):
"""Get one repo meta-data."""
try:
meta = dict((repos_cfg or _get_configured_repos(root=root)).items(alias))
meta["alias"] = alias
for key, val in six.iteritems(meta):
if val in ["0", "1"]:
meta[key] = int(meta[key]) == 1
elif val == "NONE":
meta[key] = None
return meta
<FILEE>
<SCANS>["foo", {"bar": "1.2.3-4"}]'"""
"""salt '*' pkg.install pkgs='["foo", {"bar": "<1.2.3-4"}]'"""
"""sources"""
"""A list of RPM packages to install. Must be passed as a list of dicts,"""
"""with the keys being package names, and the values being the source URI"""
"""or local path to the package."""
"""CLI Example:"""
""".. code-block:: bash"""
"""salt '*' pkg.install sources='[{"foo": "salt://foo.rpm"},{"bar": "salt://bar.rpm"}]'"""
"""ignore_repo_failure"""
"""Zypper returns error code 106 if one of the repositories are not available for various reasons."""
"""In case to set strict check, this parameter needs to be set to True. Default: False."""
"""no_recommends"""
"""Do not install recommended packages, only required ones."""
"""root"""
"""operate on a different root directory."""
"""diff_attr:"""
"""If a list of package attributes is specified, returned value will"""
"""contain them, eg.::"""
"""{'<package>': {"""
"""'old': {"""
"""'version': '<old-version>',"""
"""'arch': '<old-arch>'},"""
"""'new': {"""
"""'version': '<new-version>',"""
"""'arch': '<new-arch>'}}}"""
"""Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``,"""
"""``install_date``, ``install_date_time_t``."""
"""If ``all`` is specified, all valid attributes will be returned."""
""".. versionadded:: 2018.3.0"""
"""Returns a dict containing the new package names and versions::"""
"""{'<package>': {'old': '<old-version>',"""
"""'new': '<new-version>'}}"""
"""If an attribute list is specified in ``diff_attr``, the dict will also contain"""
"""any specified attribute, eg.::"""
"""{'<package>': {"""
"""'old': {"""
"""'version': '<old-version>',"""
"""'arch': '<old-arch>'},"""
"""'new': {"""
"""'version': '<new-version>',"""
"""'arch': '<new-arch>'}}}"""
if refresh:
refresh_db(root)
try:
pkg_params, pkg_type = __salt__["pkg_resource.parse_targets"](
name, pkgs, sources, **kwargs
)
except MinionError as exc:
raise CommandExecutionError(exc)
if pkg_params is None or len(pkg_params) == 0:
return {}
version_num = Wildcard(__zypper__(root=root))(name, version)
if version_num:
if pkgs is None and sources is None:
# Allow "version" to work for single package target
pkg_params = {name: version_num}
else:
log.warning(
'"version" parameter will be ignored for multiple ' "package targets
<FILEB>
<CHANGES>
ofpport._length = ofproto_v1_3.OFP_PORT_SIZE
<CHANGEE>
<FILEE>
<FILEB>
msg_len, xid, buf)
(msg.cookie, msg.priority, msg.reason,
msg.table_id, msg.duration_sec, msg.duration_nsec,
msg.idle_timeout, msg.hard_timeout, msg.packet_count,
msg.byte_count) = struct.unpack_from(
ofproto_v1_3.OFP_FLOW_REMOVED_PACK_STR0,
msg.buf, ofproto_v1_3.OFP_HEADER_SIZE)
offset = (ofproto_v1_3.OFP_FLOW_REMOVED_SIZE -
ofproto_v1_3.OFP_MATCH_SIZE)
msg.match = OFPMatch.parser(msg.buf, offset)
return msg
class OFPPort(ofproto_parser.namedtuple('OFPPort', (
'port_no', 'hw_addr', 'name', 'config', 'state', 'curr',
'advertised', 'supported', 'peer', 'curr_speed', 'max_speed'))):
@classmethod
def parser(cls, buf, offset):
port = struct.unpack_from(ofproto_v1_3.OFP_PORT_PACK_STR, buf, offset)
ofpport = cls(*port)
<CHANGES>
ofpport.length = ofproto_v1_3.OFP_PORT_SIZE
<CHANGEE>
return ofpport
@_register_parser
@_set_msg_type(ofproto_v1_3.OFPT_PORT_STATUS)
class OFPPortStatus(MsgBase):
def __init__(self, datapath, reason=None, desc=None):
super(OFPPortStatus, self).__init__(datapath)
self.reason = reason
self.desc = desc
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg = super(OFPPortStatus, cls).parser(datapath, version, msg_type,
msg_len, xid, buf)
<FILEE>
<SCANS>_SIZE
return stats
@_set_stats_type(ofproto_v1_3.OFPMP_DESC, OFPDescStats)
@_set_msg_type(ofproto_v1_3.OFPT_MULTIPART_REQUEST)
class OFPDescStatsRequest(OFPMultipartRequest):
def __init__(self, datapath, flags):
super(OFPDescStatsRequest, self).__init__(datapath, flags)
@OFPMultipartReply.register_stats_type(body_single_struct=True)
@_set_stats_type(ofproto_v1_3.OFPMP_DESC, OFPDescStats)
@_set_msg_type(ofproto_v1_3.OFPT_MULTIPART_REPLY)
class OFPDescStatsReply(OFPMultipartReply):
def __init__(self, datapath, **kwargs):
super(OFPDescStatsReply, self).__init__(datapath, **kwargs)
class OFPFlowStats(StringifyMixin):
def __init__(self, table_id=None, duration_sec=None, duration_nsec=None,
priority=None, idle_timeout=None, hard_timeout=None,
flags=None, cookie=None, packet_count=None,
byte_count=None, match=None, instructions=None):
super(OFPFlowStats, self).__init__()
self._length = 0
self.table_id = table_id
self.duration_sec = duration_sec
self.duration_nsec = duration_nsec
self.priority = priority
self.idle_timeout = idle_timeout
self.hard_timeout = hard_timeout
self.flags = flags
self.cookie = cookie
self.packet_count = packet_count
self.byte_count = byte_count
self.match = match
self.instructions = instructions
@classmethod
def parser(cls, buf, offset):
flow_stats = cls()
(flow_stats._length, flow_stats.table_id,
flow_stats.duration_sec, flow_stats.duration_nsec,
flow_stats.priority, flow_stats.idle_timeout,
flow_stats.hard_timeout, flow_stats.flags,
flow_stats.cookie, flow_stats.packet_count,
flow_stats.byte_count) = struct.unpack_from(
ofproto_v1_3.OFP
<FILEB>
<CHANGES>
index = self.data.index(finfo)
<CHANGEE>
<FILEE>
<FILEB>
editor.setFocus()
def new(self, filename, encoding, text):
"""Create new filename with *encoding* and *text*"""
finfo = self.create_new_editor(filename, encoding, text,
set_current=False, new=True)
finfo.editor.set_cursor_position('eof')
finfo.editor.insert_text(os.linesep)
return finfo
def load(self, filename, set_current=True):
"""Load filename, create an editor instance and return it"""
"""*Warning* This is loading file, creating editor but not executing"""
"""the source code analysis -- the analysis must be done by the editor"""
"""plugin (in case multiple editorstack instances are handled)"""
filename = osp.abspath(unicode(filename))
self.emit(SIGNAL('starting_long_process(QString)'),
_("Loading %s...") % filename)
text, enc = encoding.read(filename)
finfo = self.create_new_editor(filename, enc, text, set_current)
<CHANGES>
index = self.get_stack_index()
<CHANGEE>
self._refresh_outlineexplorer(index, update=True)
self.emit(SIGNAL('ending_long_process(QString)'), "")
if self.isVisible() and self.checkeolchars_enabled \
and sourcecode.has_mixed_eol_chars(text):
name = osp.basename(filename)
QMessageBox.warning(self, self.title,
_("<b>%s</b> contains mixed end-of-line "
"characters.<br>Spyder will fix this "
"automatically.") % name,
QMessageBox.Ok)
self.set_os_eol_chars(index)
self.is_analysis_done = False
<FILEE>
<SCANS>_stack()
if set_new_index:
self.set_stack_index(new_index)
if self.outlineexplorer is not None:
self.outlineexplorer.file_renamed(finfo.editor, finfo.filename)
return new_index
def set_stack_title(self, index, is_modified):
finfo = self.data[index]
fname = finfo.filename
is_readonly = finfo.editor.isReadOnly()
tab_text = self.get_tab_text(fname, is_modified, is_readonly)
tab_tip = self.get_tab_tip(fname, is_modified, is_readonly)
self.tabs.setTabText(index, tab_text)
self.tabs.setTabToolTip(index, tab_tip)
#------ Context menu
def __setup_menu(self):
"""Setup tab context menu before showing it"""
self.menu.clear()
if self.data:
actions = self.menu_actions
else:
actions = (self.new_action, self.open_action)
self.setFocus() # --> Editor.__get_focus_editortabwidget
add_actions(self.menu, list(actions)+self.__get_split_actions())
self.close_action.setEnabled(self.is_closable)
#------ Hor/Ver splitting
def __get_split_actions(self):
# New window
self.newwindow_action = create_action(self, _("New window"),
icon="newwindow.png", tip=_("Create a new editor window"),
triggered=lambda: self.emit(SIGNAL("create_new_window()")))
# Splitting
self.versplit_action = create_action(self, _("Split vertically"),
icon="versplit.png",
tip=_("Split vertically this editor window"),
triggered=lambda: self.emit(SIGNAL("split_vertically()")))
self.horsplit_action = create_action(self, _("Split horizontally"),
icon="horsplit.png",
tip=_("Split horizontally this editor window"),
triggered=lambda: self.emit(SIGNAL("split_horizontally()")))
self.close_action = create_action(self, _("Close this panel"),
icon="close_panel.png", triggered=self.close)
return [None, self.newwindow_action, None,
self.versplit_action, self.horsplit_action, self.close_action]
def reset_orientation(self):
self.horsplit_action.setEnabled(True)
self.versplit_action.setEnabled(True)
def set_orientation(self, orientation):
self.horsplit_action.setEnabled(orientation == Qt.
<FILEB>
<CHANGES>
logging.debug(attributes['bill_id'])
<CHANGEE>
<FILEE>
<FILEB>
elif bill_number[0] == '2':
bill_prefix = 'SB'
elif bill_number[0] == '3':
bill_prefix = 'HCR'
elif bill_number[0] == '4':
bill_prefix = 'SCR'
elif bill_number[0] == '5':
bill_prefix = 'HR'
elif bill_number[0] == '6':
bill_prefix = 'SR'
elif bill_number[0] == '7':
bill_prefix = 'HMR'
elif bill_number[0] == '8':
bill_prefix = 'SMR'
attributes['bill_id'] = bill_prefix + ' ' + bill_number
# Skip duplicates (bill is listed once for each version)
if attributes['bill_id'] in indexed_bills.keys():
continue
<CHANGES>
print attributes['bill_id']
<CHANGEE>
# Parse details page
attributes.update(
self.scrape_bill_details(assembly_url, bill_number))
# Create bill
bill = Bill(**attributes)
# Parse actions
actions = self.scrape_bill_actions(assembly_url, bill_number, year)
for action in actions:
bill.add_action(**action)
# Parse versions
versions = self.scrape_bill_versions(assembly_url, bill_number)
for version in versions:
<FILEE>
<SCANS>
label = soup.find(text=re.compile('Party:')).parent
if label.name == 'span':
attributes['party'] = \
label.parent.findNextSibling('td').contents[0]
else:
attributes['party'] = label.nextSibling
label = soup.find(text=re.compile('District:')).parent
if label.name == 'span':
attributes['district'] = \
label.parent.findNextSibling('td').contents[0]
else:
attributes['district'] = label.nextSibling
# Supplemental data
label = soup.find(text=re.compile('Address:'))
attributes['address'] = \
label.parent.parent.findNextSibling('td').contents[0]
label = soup.find(text=re.compile('Telephone:'))
try:
attributes['telephone'] = \
label.parent.parent.findNextSibling('td').contents[0]
except:
# Handle aberrant empty tag
attributes['telephone'] = u''
label = soup.find(text=re.compile('E-mail:'))
try:
email = label.parent.parent.findNextSibling('td').contents[0]
except:
email = u''
if hasattr(email, 'contents'):
attributes['email'] = email.contents[0]
else:
if email != 'None':
attributes['email'] = email
else:
attributes['email'] = u''
return attributes
def scrape_bills(self, chamber, year):
"""Scrape the ND bills considered in a given chamber during a given year."""
# Error checking
if year not in self.metadata['session_details']:
raise NoDataForYear(year)
# URL building
if chamber == 'upper':
url_chamber_name = 'senate'
norm_chamber_name = 'Senate'
else:
url_chamber_name = 'house'
norm_chamber_name = 'House'
assembly_url = '/assembly/%i-%s' % (
self.metadata['session_details'][str(year)]['number'],
year)
chamber_url = '/bill-text/%s-bill.html' % (url_chamber_name)
list_url = self.site_root + assembly_url + chamber_url
# Parsing
soup = self.parser.parse(self.urlopen(list_url))
if not soup:
raise ScrapeError('Failed to parse legaslative list page.')
table = soup.find('table', summary=norm_chamber_name + ' Bills')
bill_links = table.findAll('a', href=re.compile('bill-actions'))
indexed_bills = {}
logging.info(
'Scraping %s bills for %s.' % (norm_chamber_name, year))
for link in bill_links:
# Populate base attributes
attributes = {
'session': year,
'chamber': chamber,
}
bill_number = link.contents[0]
if not re.match('
<FILEB>
<CHANGES>
from..utils import hashString, joinScripts
<CHANGEE>
<FILEE>
<FILEB>
# Bob build tool
# Copyright (C) 2016 Jan Klötzke
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ..errors import BuildError
from ..tty import colorize
<CHANGES>
from ..utils import hashString
<CHANGEE>
import os, os.path
import schema
import subprocess
import xml.etree.ElementTree
class SvnScm:
SCHEMA = schema.Schema({
'scm' : 'svn',
'url' : str,
schema.Optional('dir') : str,
schema.Optional('if') : str,
schema.Optional('revision') : schema.Or(int, str)
})
<FILEE>
<SCANS> def __init__(self, spec):
self.__modules = [{
"recipe" : spec['recipe'],
"url" : spec["url"],
"dir" : spec.get("dir"),
"revision" : spec.get("revision")
}]
@staticmethod
def __moduleAsScript(m):
return """if [[ -d "{SUBDIR}/.svn" ]] ; then"""
"""if [[ "{URL}" != */tags/* ]] ; then"""
"""svn up {REVISION_ARG} "{SUBDIR}""""
"""fi"""
"""else"""
"""if ! svn co {REVISION_ARG} "{URL}" "{SUBDIR}" ; then"""
"""rm -rf "{SUBDIR}""""
"""exit 1"""
"""fi"""
"""fi""".format(
URL=m["url"],
SUBDIR=m["dir"] if m["dir"] else ".",
REVISION_ARG=(("-r " + str( m["revision"] ) ) if m["revision"] else '')
)
@staticmethod
def __moduleAsDigestScript(m):
return (m["url"] + ( ("@"+str(m["revision"])) if m["revision"] else "" ) + " > "
+ (m["dir"] if m["dir"] else "."))
def getProperties(self):
ret = [ m.copy() for m in self.__modules ]
for m in ret: m['scm'] = "svn"
return ret
def asScript(self):
return joinScripts([ SvnScm.__moduleAsScript(m) for m in self.__modules ])
def asDigestScript(self):
"""Return forward compatible stable string describing this/these svn module(s)."""
"""Each module has its own line where the module is represented as "url[@rev] > dir"."""
return "\n".join([ SvnScm.__moduleAsDigestScript(m) for m in self.__modules ])
def asJenkins(self, workPath, credentials, options):
scm = xml.etree.ElementTree.Element("scm", attrib={
"class" : "hudson.scm.SubversionSCM",
"plugin" : "subversion@2.4.5",
})
locations = xml.etree.ElementTree.SubElement(scm, "locations")
for m in self.__modules:
location = xml.etree.ElementTree.SubElement(locations,
"hudson.scm.SubversionSCM_-ModuleLocation")
url = m[ "url" ]
if m["revision"]:
url += ( "@" + m["revision"] )
xml.etree.ElementTree.SubElement(location, "remote").text = url
credentialsId = xml.etree.ElementTree.SubElement(location, "credentialsId")
if credentials: credentialsId.text = credentials
xml.etree.ElementTree.SubElement(location, "local").text = (
os.path.join(workPath, m["dir"]) if m["dir"] else workPath )
xml.etree.ElementTree.SubElement(location, "depthOption").text = "infinity"
xml.etree.ElementTree.SubElement(location, "ignoreExternalsOption").text = "true"
xml.etree.ElementTree.SubElement(scm, "excludedRegions")
xml.etree.ElementTree.SubElement(scm, "included
<FILEB>
<CHANGES>
if self.is_mine(addr):
<CHANGEE>
<FILEE>
<FILEB>
except:
raise BaseException(upgrade_msg)
self.update_tx_history()
if self.seed_version != SEED_VERSION:
raise BaseException(upgrade_msg)
return True
def get_new_address(self):
n = 0
for addr in self.addresses[-self.gap_limit:]:
if not self.history.get(addr):
n = n + 1
if n < self.gap_limit:
new_address = self.create_new_address2(False)
self.history[new_address] = [] #get from server
return True, new_address
else:
return False, "The last %d addresses in your list have never been used. You should use them first, or increase the allowed gap size in your preferences. "%self.gap_limit
def get_addr_balance(self, addr):
<CHANGES>
if addr in self.addresses:
<CHANGEE>
h = self.history.get(addr)
else:
h = self.interface.retrieve_history(addr)
if not h: return 0,0
c = u = 0
for item in h:
v = item['value']
if item['height']:
c += v
else:
u += v
return c, u
<FILEE>
<SCANS> EncodeAES(secret, s)
else:
return s
def pw_decode(self, s, password):
if password is not None:
secret = Hash(password)
d = DecodeAES(secret, s)
if s == self.seed:
try:
d.decode('hex')
except:
raise BaseException("Invalid password")
return d
else:
return s
def get_tx_history(self):
lines = self.tx_history.values()
lines = sorted(lines, key=operator.itemgetter("nTime"))
return lines
def update_tx_history(self):
self.tx_history= {}
for addr in self.all_addresses():
h = self.history.get(addr)
if h is None: continue
for tx in h:
tx_hash = tx['tx_hash']
line = self.tx_history.get(tx_hash)
if not line:
self.tx_history[tx_hash] = copy.copy(tx)
line = self.tx_history.get(tx_hash)
else:
line['value'] += tx['value']
if line['height'] == 0:
line['nTime'] = 1e12
self.update_tx_labels()
def update_tx_labels(self):
for tx in self.tx_history.values():
default_label = ''
if tx['value']<0:
for o_addr in tx['outputs']:
if not self.is_change(o_addr):
dest_label = self.labels.get(o_addr)
if dest_label:
default_label = 'to: ' + dest_label
else:
default_label = 'to: ' + o_addr
else:
for o_addr in tx['outputs']:
if self.is_mine(o_addr) and not self.is_change(o_addr):
dest_label = self.labels.get(o_addr)
if dest_label:
default_label = 'at: ' + dest_label
else:
default_label = 'at: ' + o_addr
tx['default_label'] = default_label
def mktx(self, to_address, amount, label, password, fee=None):
if not self.is_valid(to_address):
raise BaseException("Invalid address")
inputs, total, fee = wallet.choose_tx_inputs( amount, fee )
if not inputs:
raise BaseException("Not enough funds")
outputs = wallet.choose_tx_outputs( to_address, amount, fee, total )
s_inputs = wallet.sign_inputs( inputs, outputs, password )
tx = filter( raw_tx( s_inputs, outputs ) )
if to_address not in self.addressbook:
self.addressbook.append(to_address)
if label:
tx_hash = Hash(tx.decode('hex') )[::-1].encode('hex')
wallet.labels[tx_hash] = label
wallet.save()
return tx
def sendtx(self, tx):
tx_hash = Hash(tx.decode('hex') )[::-1].encode('hex')
out = self
<FILEB>
<CHANGES>
instance.uuid, exc_info=sys.exc_info())
<CHANGEE>
<FILEE>
<FILEB>
# not implemented at all, as basic filter could be implemented
# with VIF rules created by xapi plugin
try:
self.firewall_driver.setup_basic_filtering(
instance, network_info)
except NotImplementedError:
pass
self.firewall_driver.prepare_instance_filter(instance,
network_info)
# 5. Boot the Instance
self._spawn(instance, vm_ref)
# The VM has started, let's ensure the security groups are enforced
self.firewall_driver.apply_instance_filter(instance, network_info)
self._update_instance_progress(context, instance,
step=4,
total_steps=BUILD_TOTAL_STEPS)
except (self.XenAPI.Failure, OSError, IOError) as spawn_error:
LOG.exception(_("instance %s: Failed to spawn"),
<CHANGES>
instance.id, exc_info=sys.exc_info())
<CHANGEE>
LOG.debug(_('Instance %s failed to spawn - performing clean-up'),
instance.id)
self._handle_spawn_error(vdis, spawn_error)
raise spawn_error
def spawn_rescue(self, context, instance, image_meta, network_info):
"""Spawn a rescue instance."""
self.spawn(context, instance, image_meta, network_info)
def _create_vm(self, context, instance, vdis, network_info, image_meta):
"""Create VM instance."""
instance_name = instance.name
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is not None:
<FILEE>
<SCANS> vm_ref, hard=False)
self._update_instance_progress(context, instance,
step=3,
total_steps=RESIZE_TOTAL_STEPS)
# 4. Transfer the COW VHD
self._migrate_vhd(instance, cow_uuid, dest, sr_path)
self._update_instance_progress(context, instance,
step=4,
total_steps=RESIZE_TOTAL_STEPS)
# TODO(mdietz): we could also consider renaming these to
# something sensible so we don't need to blindly pass
# around dictionaries
vdis = {'base_copy': base_copy_uuid, 'cow': cow_uuid}
# NOTE(sirp): in case we're resizing to the same host (for dev
# purposes), apply a suffix to name-label so the two VM records
# extant until a confirm_resize don't collide.
name_label = self._get_orig_vm_name_label(instance)
VMHelper.set_vm_name_label(self._session, vm_ref, name_label)
finally:
if template_vm_ref:
self._destroy(instance, template_vm_ref,
shutdown=False, destroy_kernel_ramdisk=False)
return vdis
def _move_disks(self, instance, disk_info):
"""Move and possibly link VHDs via the XAPI plugin."""
base_copy_uuid = disk_info['base_copy']
new_base_copy_uuid = str(uuid.uuid4())
params = {'instance_uuid': instance['uuid'],
'sr_path': VMHelper.get_sr_path(self._session),
'old_base_copy_uuid': base_copy_uuid,
'new_base_copy_uuid': new_base_copy_uuid}
if 'cow' in disk_info:
cow_uuid = disk_info['cow']
new_cow_uuid = str(uuid.uuid4())
params['old_cow_uuid'] = cow_uuid
params['new_cow_uuid'] = new_cow_uuid
new_uuid = new_cow_uuid
else:
new_uuid = new_base_copy_uuid
task = self._session.async_call_plugin('migration',
'move_vhds_into_sr', {'params': pickle.dumps(params)})
self._session.wait_for_task(task, instance['uuid'])
# Now we rescan the SR so we find the VHDs
VMHelper.scan_default_sr(self._session)
# Set name-label so we can find if we need to clean up a failed
# migration
VMHelper.set_vdi_name_label(self._session, new_uuid,
instance.name)
return new_uuid
def _resize_instance(self, instance, vdi_uuid
<FILEB>
<CHANGES>
return [pygpu.get_include(), np.get_include()] + other_dirs
<CHANGEE>
<FILEE>
<FILEB>
"""}""" % {'name': name}
def c_init_code(self):
# We don't actually need the numpy API except in
# HostFromGpu and GpuFromHost and those case will be covered
# by the TensorType parameter
return ['import_pygpu__gpuarray();']
def c_headers(self):
# We need arrayobject for the PyArrayDescr struct def
# (even if we just use a pointer to it in a function def)
return ['<gpuarray/array.h>', '<gpuarray/kernel.h>',
'<gpuarray/error.h>', '<gpuarray/buffer.h>',
'<gpuarray/buffer_blas.h>', '<numpy/arrayobject.h>',
'<gpuarray_api.h>']
def c_header_dirs(self):
other_dirs = []
alt_inc_dir = os.path.abspath(os.path.normpath(sys.exec_prefix + '/Library/include'))
if os.path.exists(alt_inc_dir) and os.path.isdir(alt_inc_dir):
other_dirs.append(alt_inc_dir)
<CHANGES>
return [pygpu.get_include(), numpy.get_include()] + other_dirs
<CHANGEE>
def c_lib_dirs(self):
alt_lib_dir = os.path.abspath(os.path.normpath(sys.exec_prefix + '/Library/lib'))
if os.path.exists(alt_lib_dir) and os.path.isdir(alt_lib_dir):
return [alt_lib_dir]
return []
def c_libraries(self):
return ['gpuarray']
def c_code_cache_version(self):
ver = pygpu.gpuarray.abi_version()
# we only use the major version since the minor revision are compatible.
return (2, ver[0])
class _operators(_tensor_py_operators):
<FILEE>
<SCANS>_internal_type:
if borrow:
return self.container.value
else:
return self.container.value.copy()
else:
return np.asarray(self.container.value)
def set_value(self, value, borrow=False):
if isinstance(value, pygpu.gpuarray.GpuArray):
value = pygpu.gpuarray.array(value, copy=(not borrow),
context=self.type.context)
self.container.value = value
def __getitem__(self, *args):
return _operators.__getitem__(self, *args)
GpuArrayType.SharedVariable = GpuArraySharedVariable
notset = object()
def gpuarray_shared_constructor(value, name=None, strict=False,
allow_downcast=None, borrow=False,
broadcastable=None, target=notset):
"""SharedVariable constructor for GpuArrayType."""
"""See :func:`theano.shared`."""
""":target: default None"""
"""The device target. As None is a valid value and we need to"""
"""differentiate from the parameter notset and None, we use a"""
"""notset object."""
if target == 'gpu' or target == 'cpu':
raise TypeError('not for me')
if not isinstance(value, (np.ndarray, pygpu.gpuarray.GpuArray)):
raise TypeError('ndarray or GpuArray required')
if target is notset:
target = None
if not move_to_gpu(value):
raise TypeError('We do not move that data by default to the GPU')
try:
get_context(target)
except ContextNotDefined:
# Don't make this a hard error if we attempt to make a shared
# variable while there is no default context.
if target is None:
raise TypeError('No default context and no context specified')
raise
if broadcastable is None:
broadcastable = (False,) * value.ndim
type = GpuArrayType(value.dtype, broadcastable, context_name=target)
deviceval = pygpu.gpuarray.array(value, copy=(not borrow),
context=type.context)
return GpuArraySharedVariable(type=type, value=deviceval, name=name,
strict=strict)
theano.compile.register_view_op_c_code(GpuArrayType, """Py_XDECREF(%(oname)s);"""
"""%(oname)s = %(iname)s;"""
"""Py_XINCREF(%(oname)s);""", version=(0,))
# Register GpuArrayType C code for Shape Op.
theano.compile.
<FILEB>
<CHANGES>
if fnmatch.fnmatch(util.unistr(e), patternname):
<CHANGEE>
<FILEE>
<FILEB>
self.maskreg = regexp
self.reload()
def glob(self, pattern):
self.list_title = 'Grob:(%s)' % pattern
self.list = list(glob.iglob(pattern))
self.reload()
def globdir(self, pattern):
self.list_title = 'Grobdir:(%s)' % pattern
def _globdir(dirname, patternname):
try:
li = os.listdir(dirname)
except OSError:
return
for e in li:
entrypath = os.path.join(dirname, e)
if os.path.isdir(entrypath):
for ep in _globdir(entrypath, patternname):
yield ep
<CHANGES>
if fnmatch.fnmatch(e, patternname):
<CHANGEE>
yield os.path.normpath(entrypath)
self.list = list(_globdir(os.curdir, pattern))
self.reload()
def open_listfile(self, path):
pass
def reset(self):
if self.ismark():
self.mark_clear()
elif self.list is not None:
self.list = None
self.list_title = None
self.reload()
<FILEE>
<SCANS>
if util.termwidth(path) > width:
for name in util.unistr(path).split(os.sep)[:-1]:
if name:
path = path.replace(name, name[0])
if util.termwidth(path) <= width:
break
num = '[%d] ' % (i+1)
string = num + util.mbs_rjust(path, width-len(num))
if i == self.workspace.cursor:
self.titlebar.addstr(string, curses.A_REVERSE)
else:
self.titlebar.addstr(string)
self.titlebar.noutrefresh()
def toggle_view_ext(self):
FileStat.view_ext = not FileStat.view_ext
self.workspace.all_reload()
def toggle_view_permission(self):
FileStat.view_permission = not FileStat.view_permission
self.workspace.all_reload()
def toggle_view_nlink(self):
FileStat.view_nlink = not FileStat.view_nlink
self.workspace.all_reload()
def toggle_view_user(self):
FileStat.view_user = not FileStat.view_user
self.workspace.all_reload()
def toggle_view_group(self):
FileStat.view_group = not FileStat.view_group
self.workspace.all_reload()
def toggle_view_size(self):
FileStat.view_size = not FileStat.view_size
self.workspace.all_reload()
def toggle_view_mtime(self):
FileStat.view_mtime = not FileStat.view_mtime
self.workspace.all_reload()
def default_init(self):
for i in range(0, 5):
self.workspaces.append(Workspace(str(i+1)))
self.workspaces[-1].dirs.append(Directory(os.environ['HOME'], 10, 10, 1, 0))
self.workspaces[-1].dirs.append(Directory(os.environ['HOME'], 10, 10, 1, 0))
def savefile(self, path):
path = os.path.expanduser(path)
try:
f = open(path, 'w')
except IOError:
os.makedirs(util.unix_dirname(path))
f = open(path, 'w')
f.write('[workspace size]'+os.linesep)
f.write(str(len(self.workspaces))+os.linesep)
for ws in self.workspaces:
f.write('[workspace title]'+os.linesep)
f.write(ws.title+os.linesep)
f.write('[workspace size]'+os.linesep)
f.write(str(len(ws.dirs))+os.linesep)
for d in ws.dirs:
f.write('[workspace path]'+os.linesep)
f.write(d.path+os.linesep)
f.write('[sort kind]'+os.linesep)
f.write(d.sort_kind+os.linesep)
f.close()
def loadfile(self, path):
try:
f = open(os.path.expand
<FILEB>
<CHANGES>
viewscope = env.global_scope().context.cython_scope.viewscope
<CHANGEE>
<FILEE>
<FILEB>
elif packing == 'follow':
if has_strided:
raise CompileError(pos, "A memoryview cannot have both follow and strided axis specifiers.")
if not (is_c_contig or is_f_contig):
raise CompileError(pos, "Invalid use of the follow specifier.")
def _get_resolved_spec(env, spec):
# spec must be a NameNode or an AttributeNode
if isinstance(spec, NameNode):
return _resolve_NameNode(env, spec)
elif isinstance(spec, AttributeNode):
return _resolve_AttributeNode(env, spec)
else:
raise CompileError(spec.pos, INVALID_ERR)
def _resolve_NameNode(env, node):
try:
resolved_name = env.lookup(node.name).name
except AttributeError:
raise CompileError(node.pos, INVALID_ERR)
<CHANGES>
viewscope = env.context.cython_scope.viewscope
<CHANGEE>
return viewscope.lookup(resolved_name)
def _resolve_AttributeNode(env, node):
path = []
while isinstance(node, AttributeNode):
path.insert(0, node.attribute)
node = node.obj
if isinstance(node, NameNode):
path.insert(0, node.name)
else:
raise CompileError(node.pos, EXPR_ERR)
modnames = path[:-1]
# must be at least 1 module name, o/w not an AttributeNode.
<FILEE>
<SCANS> part of the loop.
dtype_decl = from_mvs.dtype.declaration_code("")
last_idx = ndim-1
code += INDENT*ndim+"memcpy(to_buf, from_buf+idx%(last_idx)d, sizeof(%(dtype_decl)s));\n" % locals()
code += INDENT*ndim+"to_buf += sizeof(%(dtype_decl)s);\n" % locals()
# for-loop closing braces
for k in range(ndim-1, -1, -1):
code += INDENT*(k+1)+"}\n"
# init to_mvs->data and to_mvs->diminfo.
code += INDENT+"temp_memview = to_mvs->memview;\n"
code += INDENT+"temp_data = to_mvs->data;\n"
code += INDENT+"to_mvs->memview = 0; to_mvs->data = 0;\n"
code += INDENT+"if(unlikely(-1 == __Pyx_init_memviewslice(temp_memview, %d, to_mvs))) {\n" % (ndim,)
code += INDENT*2+"return -1;\n"
code += INDENT+"}\n"
code += INDENT + "return 0;\n"
code += '}\n'
return code
def get_axes_specs(env, axes):
'''get_axes_specs(env, axes) -> list of (access, packing) specs for each axis.'''
'''access is one of 'full', 'ptr' or 'direct''''
'''packing is one of 'contig', 'strided' or 'follow''''
cythonscope = env.global_scope().context.cython_scope
viewscope = cythonscope.viewscope
access_specs = tuple([viewscope.lookup(name)
for name in ('full', 'direct', 'ptr')])
packing_specs = tuple([viewscope.lookup(name)
for name in ('contig', 'strided', 'follow')])
is_f_contig, is_c_contig = False, False
default_access, default_packing = 'direct', 'strided'
cf_access, cf_packing = default_access, 'follow'
# set the is_{c,f}_contig flag.
for idx, axis in ((0,axes[0]), (-1,axes[-1])):
if isinstance(axis.step, IntNode):
if axis.step.compile_time_value(env) != 1:
raise CompileError(axis.step.pos, STEP_ERR)
if len(axes) > 1 and (is_c_contig or is_f_contig):
raise CompileError(axis.step.pos, BOTH_CF_ERR)
if not idx:
is_f_contig = True
else:
is_c_contig = True
if len(axes) == 1:
break
assert not (is_c_contig and is_f_contig)
<FILEB>
<CHANGES>
return [node for _, node in self._make_nodes()]
<CHANGEE>
<FILEE>
<FILEB>
if not isdefined(values):
values = []
if node.result.outputs:
values.insert(i, node.result.outputs.get()[key])
else:
values.insert(i, None)
if any([val != Undefined for val in values]) and self._result.outputs:
setattr(self._result.outputs, key, values)
if returncode and any([code is not None for code in returncode]):
msg = []
for i, code in enumerate(returncode):
if code is not None:
msg += ['Subnode %d failed'%i]
msg += ['Error:', str(code)]
raise Exception('Subnodes of node: %s failed:\n%s'%(self.name,
'\n'.join(msg)))
def get_subnodes(self):
self._get_inputs()
<CHANGES>
return [node for node in self._make_nodes()]
<CHANGEE>
def _run_interface(self, execute=True, updatehash=False):
"""Run the mapnode interface"""
"""This is primarily intended for serial execution of mapnode. A parallel"""
"""execution requires creation of new nodes that can be spawned"""
old_cwd = os.getcwd()
cwd = self.output_dir()
os.chdir(cwd)
if execute:
nitems = len(filename_to_list(getattr(self.inputs, self.iterfield[0])))
nodenames = ['_' + self.name+str(i) for i in range(nitems)]
# map-reduce formulation
self._collate_results(self._node_runner(self._make_nodes(cwd),
<FILEE>
<SCANS>3-tuples of the following form::"""
"""[(source, target,"""
"""[('sourceoutput/attribute', 'targetinput'),"""
"""...]),"""
"""...]"""
"""Or::"""
"""[(source, target, [(('sourceoutput1', func, arg2, ...),"""
"""'targetinput'), ...]),"""
"""...]"""
"""sourceoutput1 will always be the first argument to func"""
"""and func will be evaluated and the results sent ot targetinput"""
"""currently func needs to define all its needed imports within the"""
"""function as we use the inspect module to get at the source code"""
"""and execute it remotely"""
if len(args)==1:
connection_list = args[0]
elif len(args)==4:
connection_list = [(args[0], args[2], [(args[1], args[3])])]
else:
raise Exception('unknown set of parameters to connect function')
if not kwargs:
disconnect = False
else:
disconnect = kwargs['disconnect']
not_found = []
newnodes = []
for srcnode, destnode, _ in connection_list:
if (srcnode not in newnodes) and (srcnode not in self._graph.nodes()):
newnodes.append(srcnode)
if (destnode not in newnodes) and (destnode not in self._graph.nodes()):
newnodes.append(destnode)
if newnodes:
self._check_nodes(newnodes)
for node in newnodes:
if node._hierarchy is None:
node._hierarchy = self.name
for srcnode, destnode, connects in connection_list:
connected_ports = []
# check to see which ports of destnode are already
# connected.
if not disconnect and (destnode in self._graph.nodes()):
for edge in self._graph.in_edges_iter(destnode):
data = self._graph.get_edge_data(*edge)
for sourceinfo, destname in data['connect']:
connected_ports += [destname]
for source, dest in connects:
# Currently datasource/sink/grabber.io modules
# determine their inputs/outputs depending on
# connection settings. Skip these modules in the check
if dest in connected_ports:
raise Exception('Input %s of node %s is already ' \
'connected'%(dest,destnode))
if not (hasattr(destnode, '_interface') and '.io' in str(destnode._interface.__class__)):
if not destnode._check_inputs(dest):
not_found.append(['in', destnode.name, dest])
if not (hasattr(srcnode, '_interface') and '.io' in str(srcnode._interface.__class__)):
if isinstance(source, tuple):
# handles the case that source is specified
# with a function
sourcename = source[0]
elif isinstance
<FILEB>
<CHANGES>
return ("jess" if index >= int(value.split(",")[1] if "," in value else 4) else "unicable") if value.startswith("dSCR") else value
<CHANGEE>
<FILEE>
<FILEB>
advanced_lnb_diseqcmode_choices = [("none", _("None")), ("1_0", _("1.0")), ("1_1", _("1.1")), ("1_2", _("1.2"))]
advanced_lnb_commandOrder1_0_choices = [("ct", "DiSEqC 1.0, toneburst"), ("tc", "toneburst, DiSEqC 1.0")]
advanced_lnb_commandOrder_choices = [
("ct", "DiSEqC 1.0, toneburst"), ("tc", "toneburst, DiSEqC 1.0"),
("cut", "DiSEqC 1.0, DiSEqC 1.1, toneburst"), ("tcu", "toneburst, DiSEqC 1.0, DiSEqC 1.1"),
("uct", "DiSEqC 1.1, DiSEqC 1.0, toneburst"), ("tuc", "toneburst, DiSEqC 1.1, DiSEqC 1.0")]
advanced_lnb_diseqc_repeat_choices = [("none", _("None")), ("one", _("One")), ("two", _("Two")), ("three", _("Three"))]
advanced_lnb_fast_turning_btime = mktime(datetime(1970, 1, 1, 7, 0).timetuple())
advanced_lnb_fast_turning_etime = mktime(datetime(1970, 1, 1, 19, 0).timetuple())
def configLOFChanged(configElement):
if configElement.value == "unicable":
x = configElement.slot_id
lnb = configElement.lnb_id
nim = config.Nims[x]
lnbs = nim.advanced.lnb
section = lnbs[lnb]
if isinstance(section.unicable, ConfigNothing):
def getformat(value, index):
<CHANGES>
return index >= 4 and "jess" or "unicable" if value == "dSRC" else value
<CHANGEE>
def positions<SCANS> in self.satList:
list.append(x)
elif nim.diseqcMode.value == "positioner_select":
userSatlist = nim.userSatellitesList.value
userSatlist = userSatlist.replace("]", "").replace("[", "")
for x in self.satList:
sat_str = str(x[0])
if userSatlist and ("," not in userSatlist and sat_str == userSatlist) or ((', ' + sat_str + ',' in userSatlist) or (userSatlist.startswith(sat_str + ',')) or (userSatlist.endswith(', ' + sat_str))):
list.append(x)
elif configMode == "advanced":
for x in range(3601, 3605):
if int(nim.advanced.sat[x].lnb.value) != 0:
for x in self.satList:
list.append(x)
if not list:
for x in self.satList:
lnbnum = int(nim.advanced.sat[x[0]].lnb.value)
if lnbnum != 0:
lnb = nim.advanced.lnb[lnbnum]
if lnb.diseqcMode.value == "1_2":
list.append(x)
for x in range(3605, 3607):
if int(nim.advanced.sat[x].lnb.value) != 0:
userSatlist = nim.advanced.sat[x].userSatellitesList.value
userSatlist = userSatlist.replace("]", "").replace("[", "")
for user_sat in self.satList:
sat_str = str(user_sat[0])
if userSatlist and ("," not in userSatlist and sat_str == userSatlist) or ((', ' + sat_str + ',' in userSatlist) or (userSatlist.startswith(sat_str + ',')) or (userSatlist.endswith(', ' + sat_str))) and user_sat not in list:
list.append(
<FILEB>
<CHANGES>
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), 'lib'))
<CHANGEE>
<FILEE>
<FILEB>
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sys
# we only need this for compiling an EXE and I will just always do that on 2.6+
if sys.hexversion >= 0x020600F0:
from multiprocessing import Process, freeze_support
import os
import os.path
import threading
import time
import signal
import sqlite3
import traceback
import getopt
# allow libraries to import each other
<CHANGES>
sys.path.append(os.path.join(os.path.dirname(__file__), 'lib'))
<CHANGEE>
import sickbeard
from sickbeard import db
from sickbeard.tv import TVShow
from sickbeard import logger
from sickbeard.common import *
from sickbeard.version import SICKBEARD_VERSION
from sickbeard.webserveInit import initWebServer
from lib.configobj import ConfigObj
signal.signal(signal.SIGINT, sickbeard.sig_handler)
signal.signal(signal.SIGTERM, sickbeard.sig_handler)
def loadShowsFromDB():
myDB = db.DBConnection()
<FILEE>
<SCANS>#!/usr/bin/python
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
sqlResults = myDB.select("SELECT * FROM tv_shows")
for sqlShow in sqlResults:
try:
curShow = TVShow(int(sqlShow["tvdb_id"]))
sickbeard.showList.append(curShow)
except Exception, e:
logger.log(u"There was an error creating the show in "+sqlShow["location"]+": "+str(e).decode('utf-8'), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
#TODO: make it update the existing shows if the showlist has something in it
def daemonize():
# Make a non-session-leader child process
try:
pid = os.fork()
if pid != 0:
sys.exit(0)
except OSError, e:
raise RuntimeError("1st fork failed: %s [%d]" %
(e.strerror, e.errno))
os.chdir(sickbeard.PROG_DIR)
os.setsid()
# Make sure I can read my own files and shut out others
prev = os.umask(0)
os.umask(prev and int('077',8))
# Make the child a session-leader by detaching from the terminal
try:
pid = os.fork()
if pid != 0:
sys.exit(0)
except OSError, e:
raise RuntimeError("2st fork failed: %s [%d]" %
(e.strerror, e.errno))
raise Exception, "%s [%d]" % (e.strerror, e.errno)
dev_null = file('/dev/null', 'r')
os.dup2(dev_null.fileno(), sys.stdin.fileno())
def main():
# do some preliminary stuff
sickbeard.MY_FULLNAME = os.path.normpath(os.path.abspath(__file__))
sickbeard.MY_NAME = os.path.basename(sickbeard.MY_FULLNAME)
sickbeard.PROG_DIR = os.path.dirname(sickbeard.MY_FULLNAME)
sickbeard.MY_ARGS = sys.argv[1:]
sickbeard.
<FILEB>
<CHANGES>
rars = (x for x in files if os.path.isfile(x) and rarfile.is_rarfile(x))
<CHANGEE>
<FILEE>
<FILEB>
if isinstance(new_pattern, bytes):
new_pattern = re.compile(b'([*?[])').sub(br'[\1]', new_pattern)
else:
new_pattern = re.compile('([*?[])').sub(r'[\1]', new_pattern)
pattern = new_pattern + pattern
files = []
for root, __, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(root, filename))
if not subfolders:
break
if sort:
files = sorted(files, key=os.path.getsize, reverse=True)
return files
@staticmethod
def _rar_basename(file_path, files):
"""Return the lowercase basename of the source rar archive if found."""
videofile = os.path.basename(file_path)
<CHANGES>
rars = (x for x in files if rarfile.is_rarfile(x))
<CHANGEE>
for rar in rars:
try:
content = rarfile.RarFile(rar).namelist()
except NeedFirstVolume:
continue
except RarError as error:
logger.log(u'An error occurred while reading the following RAR file: {name}. '
u'Error: {message}'.format(name=rar, message=error), logger.WARNING)
continue
if videofile in content:
return os.path.splitext(os.path.basename(rar))[0].lower()
def _delete(self, files, associated_files=False):
<FILEE>
<SCANS> if any(char in new_pattern for char in ['[', '?', '*']):
# Escaping is done by wrapping any of "*?[" between square brackets.
# Modified from: https://hg.python.org/cpython/file/tip/Lib/glob.py#l161
"""Delete the file(s) and optionally all associated files."""
""":param files: path(s) to file(s) that should be deleted"""
""":param associated_files: True to delete all files which differ only by extension, False to leave them"""
gen_files = generate(files or [])
files = list(gen_files)
# also delete associated files, works only for 1 file
if associated_files and len(files) == 1:
files += self.list_associated_files(files[0], subfolders=True)
for filename in files:
if os.path.isfile(filename):
self.log(u'Deleting file: {0}'.format(filename), logger.DEBUG)
# check first the read-only attribute
file_attribute = os.stat(filename)[0]
if not file_attribute & stat.S_IWRITE:
# File is read-only, so make it writeable
self.log(u'Read only mode on file {0}. '
u'Will try to make it writeable'.format(filename),
logger.DEBUG)
try:
os.chmod(filename, stat.S_IWRITE)
except OSError as error:
self.log(
u'Cannot change permissions for {path}. '
u'Error: {msg}'.format(path=filename, msg=error),
logger.WARNING
)
os.remove(filename)
# do the library update for synoindex
notifiers.synoindex_notifier.deleteFile(filename)
@staticmethod
def rename_associated_file(new_path, new_basename, filepath):
"""Rename associated file using media basename."""
""":param new_path: full show folder path where the file will be moved|copied|linked to"""
""":param new_basename: the media base filename (no extension) to use during the rename"""
""":param filepath: full path of the associated file"""
""":return: renamed full file path"""
# file extension without leading dot
extension = helpers.get_extension(filepath)
# initially set current extension as new extension
new_extension = extension
# replace nfo with nfo-orig to avoid conflicts
if extension == 'nfo' and app.NFO_RENAME:
new_extension = 'nfo-orig'
elif is_subtitle(filepath):
split_path = filepath.rsplit('.', 2)
# len != 3 means we have a subtitle without language
if len(split_path) == 3:
sub_code = split_path[1]
code = sub_code.lower().replace('_', '-')
if from_code(code, unknown
<FILEB>
<CHANGES>
self.ui = uic.loadUi(os.path.join(priv_dir,'mesonrunner.ui'))
<CHANGEE>
<FILEE>
<FILEB>
else:
ns = True
self.coredata.strip = ns
def coverage_changed(self, newState):
if newState == 0:
ns = False
else:
ns = True
self.coredata.coverage = ns
def pch_changed(self, newState):
if newState == 0:
ns = False
else:
ns = True
self.coredata.use_pch = ns
class ProcessRunner():
def __init__(self, rundir, cmdlist):
self.cmdlist = cmdlist
<CHANGES>
self.ui = uic.loadUi('mesonrunner.ui')
<CHANGEE>
self.timer = QTimer(self.ui)
self.timer.setInterval(1000)
self.timer.timeout.connect(self.timeout)
self.process = PyQt5.QtCore.QProcess()
self.process.setProcessChannelMode(PyQt5.QtCore.QProcess.MergedChannels)
self.process.setWorkingDirectory(rundir)
self.process.readyRead.connect(self.read_data)
self.process.finished.connect(self.finished)
self.ui.termbutton.clicked.connect(self.terminated)
self.return_value = 100
def run(self):
self.process.start(self.cmdlist[0], self.cmdlist[1:])
<FILEE>
<SCANS>entry.text()
builddir = self.ui.build_entry.text()
cross = self.ui.cross_entry.text()
cmdlist = [os.path.join(os.path.split(__file__)[0], 'meson.py'), srcdir, builddir]
if cross != '':
cmdlist += ['--cross', cross]
pr = ProcessRunner(os.getcwd(), cmdlist)
rvalue = pr.run()
if rvalue == 0:
os.execl(__file__, 'dummy', builddir)
def update_button(self):
if self.ui.source_entry.text() == '' or self.ui.build_entry.text() == '':
self.ui.generate_button.setEnabled(False)
else:
self.ui.generate_button.setEnabled(True)
def src_browse_clicked(self):
self.dialog.setFileMode(2)
if self.dialog.exec():
self.ui.source_entry.setText(self.dialog.selectedFiles()[0])
def build_browse_clicked(self):
self.dialog.setFileMode(2)
if self.dialog.exec():
self.ui.build_entry.setText(self.dialog.selectedFiles()[0])
def cross_browse_clicked(self):
self.dialog.setFileMode(1)
if self.dialog.exec():
self.ui.cross_entry.setText(self.dialog.selectedFiles()[0])
if __name__ == '__main__':
app = QApplication(sys.argv)
if len(sys.argv) == 1:
arg = ""
elif len(sys.argv) == 2:
arg = sys.argv[1]
else:
print(sys.argv[0], "<build or source dir>")
sys.exit(1)
if os.path.exists(os.path.join(arg, 'meson-private/coredata.dat')):
gui = MesonGui(arg)
else:
runner = Starter(arg)
sys.exit(app.exec_())
<FILEB>
<CHANGES>
index = self.data.index(finfo)
<CHANGEE>
<FILEE>
<FILEB>
editor.setFocus()
def new(self, filename, encoding, text):
"""Create new filename with *encoding* and *text*"""
finfo = self.create_new_editor(filename, encoding, text,
set_current=False, new=True)
finfo.editor.set_cursor_position('eof')
finfo.editor.insert_text(os.linesep)
return finfo
def load(self, filename, set_current=True):
"""Load filename, create an editor instance and return it"""
"""*Warning* This is loading file, creating editor but not executing"""
"""the source code analysis -- the analysis must be done by the editor"""
"""plugin (in case multiple editorstack instances are handled)"""
filename = osp.abspath(unicode(filename))
self.emit(SIGNAL('starting_long_process(QString)'),
_("Loading %s...") % filename)
text, enc = encoding.read(filename)
finfo = self.create_new_editor(filename, enc, text, set_current)
<CHANGES>
index = self.get_stack_index()
<CHANGEE>
self._refresh_outlineexplorer(index, update=True)
self.emit(SIGNAL('ending_long_process(QString)'), "")
if self.isVisible() and self.checkeolchars_enabled \
and sourcecode.has_mixed_eol_chars(text):
name = osp.basename(filename)
QMessageBox.warning(self, self.title,
_("<b>%s</b> contains mixed end-of-line "
"characters.<br>Spyder will fix this "
"automatically.") % name,
QMessageBox.Ok)
self.set_os_eol_chars(index)
self.is_analysis_done = False
<FILEE>
<SCANS>explorer')
self.outlineexplorer_enabled = state
def set_default_font(self, font, color_scheme=None):
# get_font(self.CONF_SECTION)
self.default_font = font
if color_scheme is not None:
self.color_scheme = color_scheme
if self.data:
for finfo in self.data:
finfo.editor.set_font(font, color_scheme)
def set_color_scheme(self, color_scheme):
self.color_scheme = color_scheme
if self.data:
for finfo in self.data:
finfo.editor.set_color_scheme(color_scheme)
def set_wrap_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'wrap')
self.wrap_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_wrap_mode(state)
def set_tabmode_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'tab_always_indent')
self.tabmode_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_tab_mode(state)
def set_intelligent_backspace_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'intelligent_backspace')
self.intelligent_backspace_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.toggle_intelligent_backspace(state)
def set_occurence_highlighting_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'occurence_highlighting')
self.occurence_highlighting_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_occurence_highlighting(state)
def set_occurence_highlighting_timeout(self, timeout):
# CONF.get(self.CONF_SECTION, 'occurence_highlighting/timeout')
self.occurence_highlighting_timeout = timeout
if self.data:
for finfo in self.data:
finfo.editor.set_occurence_timeout(timeout)
def set_highlight_current_line_enabled(self, state):
self.highlight_current_line_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_highlight_current_line(state)
def set_checkeolchars_enabled(self, state):
# CONF.get(self.CONF_SECTION
<FILEB>
<CHANGES>
from._compat import BROKEN_PYPY_CTXMGR_EXIT, reraise
<CHANGEE>
<FILEE>
<FILEB>
# -*- coding: utf-8 -*-
"""flask.ctx"""
"""~~~~~~~~~"""
"""Implements the objects required to keep the context."""
""":copyright: (c) 2014 by Armin Ronacher."""
""":license: BSD, see LICENSE for more details."""
from __future__ import with_statement
import sys
from functools import update_wrapper
from werkzeug.exceptions import HTTPException
from .globals import _request_ctx_stack, _app_ctx_stack
from .module import blueprint_is_module
from .signals import appcontext_pushed, appcontext_popped
<CHANGES>
from ._compat import BROKEN_PYPY_CTXMGR_EXIT
<CHANGEE>
class _AppCtxGlobals(object):
"""A plain object."""
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __contains__(self, item):
return item in self.__dict__
def __iter__(self):
return iter(self.__dict__)
def __repr__(self):
top = _app_ctx_stack.top
if top is not None:
return '<flask.g of %r>' % top.app.name
<FILEE>
<SCANS> """self.remote_addr = remote_addr"""
""".. versionadded:: 0.7"""
return _request_ctx_stack.top is not None
def has_app_context():
"""Works like :func:`has_request_context` but for the application"""
"""context. You can also just do a boolean check on the"""
""":data:`current_app` object instead."""
""".. versionadded:: 0.9"""
return _app_ctx_stack.top is not None
class AppContext(object):
"""The application context binds an application object implicitly"""
"""to the current thread or greenlet, similar to how the"""
""":class:`RequestContext` binds request information. The application"""
"""context is also implicitly created if a request context is created"""
"""but the application is not on top of the individual application"""
"""context."""
def __init__(self, app):
self.app = app
self.url_adapter = app.create_url_adapter(None)
self.g = app.app_ctx_globals_class()
# Like request context, app contexts can be pushed multiple times
# but there a basic "refcount" is enough to track them.
self._refcnt = 0
def push(self):
"""Binds the app context to the current context."""
self._refcnt += 1
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
_app_ctx_stack.push(self)
appcontext_pushed.send(self.app)
def pop(self, exc=None):
"""Pops the app context."""
self._refcnt -= 1
if self._refcnt <= 0:
if exc is None:
exc = sys.exc_info()[1]
self.app.do_teardown_appcontext(exc)
rv = _app_ctx_stack.pop()
assert rv is self, 'Popped wrong app context. (%r instead of %r)' \
% (rv, self)
appcontext_popped.send(self.app)
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop(exc_value)
if BROKEN_PYPY_CTXMGR_EXIT and exc_type is not None:
reraise(exc_type, exc_value, tb)
class RequestContext(object):
"""The request context contains all request relevant information. It is"""
"""created at the beginning of the request and pushed to the"""
"""`_request_ctx_stack` and removed at the end of it. It will create the"""
"""URL adapter and request object for the WSGI environment provided."""
"""Do not attempt to use this class directly, instead use"""
""":meth:`~flask.Flask.test_request_context` and"""
""":meth:`~flask.Flask.request_context` to create this object."""
"""When the request context is popped, it will evaluate all the"""
"""functions registered on the application for teardown execution"""
"""(:meth:`~flask.Flask.teardown_request`)."""
"""The request context is automatically popped at the end of the request"""
"""for you. In debug mode the request context is kept around if"""
"""exceptions happen so that interactive
<FILEB>
<CHANGES>
if not cls.config.compute.allow_tenant_isolation:
<CHANGEE>
<FILEE>
<FILEB>
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from tempest.api import compute
from tempest.api.compute import base
from tempest import clients
from tempest import exceptions
from tempest.test import attr
class ListServersNegativeTestJSON(base.BaseComputeTest):
_interface = 'json'
@classmethod
def _ensure_no_servers(cls, servers, username, tenant_name):
"""If there are servers and there is tenant isolation then a"""
"""skipException is raised to skip the test since it requires no servers"""
"""to already exist for the given user/tenant."""
"""If there are servers and there is not tenant isolation then the test"""
"""blocks while the servers are being deleted."""
if len(servers):
<CHANGES>
if not compute.MULTI_USER:
<CHANGEE>
for srv in servers:
cls.client.wait_for_server_termination(srv['id'],
ignore_error=True)
else:
msg = ("User/tenant %(u)s/%(t)s already have "
"existing server instances. Skipping test." %
{'u': username, 't': tenant_name})
raise cls.skipException(msg)
@classmethod
def setUpClass(cls):
super(ListServersNegativeTestJSON, cls).setUpClass()
cls.client = cls.servers_client
<FILEE>
<SCANS># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
cls.servers = []
if compute.MULTI_USER:
if cls.config.compute.allow_tenant_isolation:
creds = cls.isolated_creds.get_alt_creds()
username, tenant_name, password = creds
cls.alt_manager = clients.Manager(username=username,
password=password,
tenant_name=tenant_name)
else:
# Use the alt_XXX credentials in the config file
cls.alt_manager = clients.AltManager()
cls.alt_client = cls.alt_manager.servers_client
# Under circumstances when there is not a tenant/user
# created for the test case, the test case checks
# to see if there are existing servers for the
# either the normal user/tenant or the alt user/tenant
# and if so, the whole test is skipped. We do this
# because we assume a baseline of no servers at the
# start of the test instead of destroying any existing
# servers.
resp, body = cls.client.list_servers()
cls._ensure_no_servers(body['servers'],
cls.os.username,
cls.os.tenant_name)
resp, body = cls.alt_client.list_servers()
cls._ensure_no_servers(body['servers'],
cls.alt_manager.username,
cls.alt_manager.tenant_name)
# The following servers are created for use
# by the test methods in this class. These
# servers are cleaned up automatically in the
# tearDownClass method of the super-class.
cls.existing_fixtures = []
cls.deleted_fixtures = []
cls.start_time = datetime.datetime.utcnow()
for x in xrange(2):
resp, srv = cls.create_server()
cls.existing_fixtures.append(srv)
resp, srv = cls.create_server()
cls.client.delete_server(srv['id'])
# We ignore errors on termination because the server may
# be put into ERROR status on a quick spawn, then delete,
# as the compute node expects the instance local status
# to be spawning, not deleted. See LP Bug#1061167
cls.client.wait_for_server_termination(srv['id'],
ignore_error=True)
cls.deleted_fixtures.append(srv)
@attr(type=['negative', 'gate'])
def test_list_servers_with_a_deleted_server
<FILEB>
<CHANGES>
if (isinstance(v.dtype, type) and issubclass(v.dtype, basestring)) or v.dtype.char == 'S':
<CHANGEE>
<FILEE>
<FILEB>
"""(except for boundary variables defined in Section 7.1, "Cell Boundaries" and climatology variables"""
"""defined in Section 7.4, "Climatological Statistics")."""
"""Units are not required for dimensionless quantities. A variable with no units attribute is assumed"""
"""to be dimensionless. However, a units attribute specifying a dimensionless unit may optionally be"""
"""included."""
"""- units required"""
"""- type must be recognized by udunits"""
"""- if std name specified, must be consistent with standard name table, must also be consistent with a"""
"""specified cell_methods attribute if present"""
ret_val = []
deprecated = ['level', 'layer', 'sigma_level']
for k, v in ds.dataset.variables.iteritems():
# skip climatological vars, boundary vars
if v in self._find_clim_vars(ds) or \
v in self._find_boundary_vars(ds).itervalues() or \
v.shape == ():
continue
# skip string type vars
<CHANGES>
if v.dtype.char == 'S':
<CHANGEE>
continue
# skip quality control vars
if hasattr(v, 'flag_meanings'):
continue
if hasattr(v, 'standard_name') and 'status_flag' in v.standard_name:
continue
# skip DSG cf_role
if hasattr(v, "cf_role"):
continue
units = getattr(v, 'units', None)
# 1) "units" attribute must be present
presence = Result(BaseCheck.HIGH, units is not None, ('units', k, 'present'))
<FILEE>
<SCANS>
@score_group('convention_attrs')
def check_convention_possibly_var_attrs(self, ds):
"""2.6.2 institution, source, references, and comment, either global or assigned to individual variables."""
"""When an attribute appears both globally and as a variable attribute, the variable's version has precedence."""
"""Must be strings."""
attrs = ['institution', 'source', 'references', 'comment']
ret = []
# check attrs on global ds
# can't predetermine total - we only report attrs we find
for k, v in ds.dataset.variables.iteritems():
vattrs = v.ncattrs()
for a in attrs:
if a in vattrs:
ret.append(Result(BaseCheck.HIGH, isinstance(getattr(v, a), basestring), (k, a)))
return ret
###############################################################################
#
# CHAPTER 3: Description of the Data
#
###############################################################################
def check_units(self, ds):
"""3.1 The units attribute is required for all variables that represent dimensional quantities"""
if not presence.value:
presence.msgs = ['units attribute required']
ret_val.append(presence)
continue
# 2) units attribute must be a string
astring = Result(BaseCheck.HIGH, isinstance(units, basestring), ('units', k, 'string'))
if not astring.value:
astring.msgs = ["units not a string (%s)" % type(units)]
ret_val.append(astring)
continue
# now, units are present and string
# 3) units are not deprecated
resdeprecated = Result(BaseCheck.LOW, not units in deprecated, ('units', k, 'deprecated'))
if not resdeprecated.value:
resdeprecated.msgs = ['units (%s) is deprecated' % units]
ret_val.append(resdeprecated)
continue
# 4) units are known
knownu = Result(BaseCheck.HIGH, units_known(units), ('units', k, 'known'))
if not knownu.value:
knownu.msgs = ['unknown units type (%s)' % units]
ret_val.append(knownu)
#continue
# units look ok so far, check against standard name / cell methods
std_name = getattr(v, 'standard_name', None)
std_name_modifier = None
if isinstance(std_name, basestring):
if ' ' in std_name:
std_name, std_name_modifier = std_name.split(' ', 1)
# if no standard name or cell_methods, nothing left to do
if std_name is None and not hasattr(v, 'cell_methods'):
#ret_val.append(Result(BaseCheck.HIGH, True, ('units', k, 'ok')))
continue
# 5) if a known std_name, use the units provided
if std_name is not None
<FILEB>
<CHANGES>
if currency.company_id.id!= company_id:
<CHANGEE>
<FILEE>
<FILEB>
}
}
if type in ('in_invoice', 'in_refund'):
result['value']['partner_bank'] = bank_id
if payment_term != partner_payment_term:
if partner_payment_term:
to_update = self.onchange_payment_term_date_invoice(
cr,uid,ids,partner_payment_term,date_invoice)
result['value'].update(to_update['value'])
else:
result['value']['date_due'] = False
if partner_bank_id != bank_id:
to_update = self.onchange_partner_bank(cr, uid, ids, bank_id)
result['value'].update(to_update['value'])
return result
def onchange_currency_id(self, cr, uid, ids, curr_id, company_id):
if curr_id:
currency = self.pool.get('res.currency').browse(cr, uid, curr_id)
<CHANGES>
if currency.company_id != company_id:
<CHANGEE>
raise osv.except_osv(_('Configration Error !'),
_('Can not select currency that is not related to current company.\nPlease select accordingly !.'))
return {}
def onchange_payment_term_date_invoice(self, cr, uid, ids, payment_term_id, date_invoice):
if not payment_term_id:
return {}
res={}
pt_obj= self.pool.get('account.payment.term')
if not date_invoice :
date_invoice = time.strftime('%Y-%m-%d')
pterm_list = pt_obj.compute(cr, uid, payment_term_id, value=1, date_ref=date_invoice)
if pterm_list:
<FILEE>
<SCANS> if ids:
if company_id:
inv_obj = self.browse(cr,uid,ids)
for line in inv_obj[0].invoice_line:
if line.account_id:
if line.account_id.company_id.id != company_id:
result_id = self.pool.get('account.account').search(cr,uid,[('name','=',line.account_id.name),('company_id','=',company_id)])
if not result_id:
raise osv.except_osv(_('Configration Error !'),
_('Can not find account chart for this company in invoice line account, Please Create account.'))
r_id = self.pool.get('account.invoice.line').write(cr,uid,[line.id],{'account_id': result_id[0]})
else:
if invoice_line:
for inv_line in invoice_line:
obj_l = self.pool.get('account.account').browse(cr,uid,inv_line[2]['account_id'])
if obj_l.company_id.id != company_id:
raise osv.except_osv(_('Configration Error !'),
_('invoice line account company is not match with invoice company.'))
else:
continue
if company_id:
val['journal_id']=False
journal_ids=self.pool.get('account.journal').search(cr,uid,[('company_id','=',company_id)])
dom={'journal_id': [('id','in',journal_ids)]}
else:
journal_ids=self.pool.get('account.journal').search(cr,uid,[])
dom={'journal_id': [('id','in',journal_ids)]}
return {'value' : val, 'domain': dom }
# go from canceled state to draft state
def action_cancel_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state':'draft'})
wf_service = netsvc.LocalService("workflow")
for inv_id in ids:
wf_service.trg_create(uid, 'account.invoice', inv_id, cr)
return True
# Workflow stuff
#################
# return the ids of the move lines which has the same account than the invoice
# whose id is in ids
def move_line_id_payment_get(self, cr, uid, ids, *args):
res = []
if not ids: return res
cr.execute('select \
l.id \
from account_move_line l \
left join account_invoice i on (i.move_id=l.move_id) \
where i.id in ('+','.
<FILEB>
<CHANGES>
context, instance, legacy=False)
<CHANGEE>
<FILEE>
<FILEB>
cleaned_keys = dict(
key_data=instance.key_data,
auto_disk_config=instance.auto_disk_config)
instance.key_data = None
instance.auto_disk_config = False
return cleaned_keys
def _unshelve_instance_key_restore(self, instance, keys):
"""Restore previously scrubbed keys before saving the instance."""
instance.update(keys)
def _unshelve_instance(self, context, instance, image):
self._notify_about_instance_usage(context, instance, 'unshelve.start')
compute_info = self._get_compute_info(context, self.host)
instance.task_state = task_states.SPAWNING
instance.node = compute_info['hypervisor_hostname']
instance.host = self.host
instance.save()
network_info = self._get_instance_nw_info(context, instance)
bdms = self.conductor_api.block_device_mapping_get_all_by_instance(
<CHANGES>
context, instance)
<CHANGEE>
block_device_info = self._prep_block_device(context, instance, bdms)
scrubbed_keys = self._unshelve_instance_key_scrub(instance)
try:
self.driver.spawn(context, instance, image, injected_files=[],
admin_password=None,
network_info=network_info,
block_device_info=block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Instance failed to spawn'), instance=instance)
if image:
image_service = glance.get_default_image_service()
<FILEE>
<SCANS> for ip in vif.fixed_ips():
if ip['version'] == 4:
update_data['access_ip_v4'] = ip['address']
if ip['version'] == 6:
update_data['access_ip_v6'] = ip['address']
return
if set_access_ip:
_set_access_ip_values()
if network_info is not None:
network_info.wait(do_raise=True)
return self._instance_update(context, instance['uuid'],
**update_data)
def _notify_about_instance_usage(self, context, instance, event_suffix,
network_info=None, system_metadata=None,
extra_usage_info=None):
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, event_suffix,
network_info=network_info,
system_metadata=system_metadata,
extra_usage_info=extra_usage_info)
def _deallocate_network(self, context, instance,
requested_networks=None):
LOG.debug(_('Deallocating network for instance'), instance=instance)
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
def _get_volume_bdms(self, bdms, legacy=True):
"""Return only bdms that have a volume_id."""
if legacy:
return [bdm for bdm in bdms if bdm['volume_id']]
else:
return [bdm for bdm in bdms
if bdm['destination_type'] == 'volume']
# NOTE(danms): Legacy interface for digging up volumes in the database
def _get_instance_volume_bdms(self, context, instance, legacy=True):
if isinstance(instance, instance_obj.Instance):
instance = obj_base.obj_to_primitive(instance)
return self._get_volume_bdms(
self.conductor_api.block_device_mapping_get_all_by_instance(
context, instance, legacy), legacy)
def _get_instance_volume_bdm(self, context, instance, volume_id):
bdms = self._get_instance_volume_bdms(context, instance)
for bdm in bdms:
# NOTE(vish): Comparing as strings because the os_api doesn't
# convert to integer and we may wish to support uuids
# in the future.
if str(bdm['volume_id']) == str(volume_id):
return bdm
def _get_instance_volume_block_device_info(self, context, instance,
refresh_conn_info=False):
"""
<FILEB>
<CHANGES>
no_punct = ''.join([c for c in title.lower() if c.isalnum() or c in [" ", "_", "-"]])
<CHANGEE>
<FILEE>
<FILEB>
help='When pushing to the server, appends filename and timestamp to the end of the dashboard description')
push_parser.add_argument('file', help='dashboard files to push to the server', nargs='+', type=argparse.FileType('r'))
push_parser.set_defaults(func=self._push)
new_file_parser = verb_parsers.add_parser('new_file', help='Create a new dashboard and put its contents in a file')
new_file_parser.add_argument('filename', help='name of file to create with empty dashboard')
new_file_parser.set_defaults(func=self._new_file)
web_view_parser = verb_parsers.add_parser('web_view', help='View the dashboard in a web browser')
web_view_parser.add_argument('file', help='dashboard file', type=argparse.FileType('r'))
web_view_parser.set_defaults(func=self._web_view)
delete_parser = verb_parsers.add_parser('delete', help='Delete dashboards.')
delete_parser.add_argument('dashboard_id', help='dashboard to delete')
delete_parser.set_defaults(func=self._delete)
def _pull(self, args):
self._write_dash_to_file(args.dashboard_id, args.filename, args.timeout, args.format, args.string_ids)
def _pull_all(self, args):
self.dog.timeout = args.timeout
def _title_to_filename(title):
# Get a lowercased version with most punctuation stripped out...
<CHANGES>
no_punct = [c for c in title.lower() if c.isalnum() or c in [" ", "_", "-"]]
<CHANGEE>
# Now replace all -'s, _'s and spaces with "_", and strip trailing _
return no_punct.replace(" ", "_").replace("-", "_").strip("_")
format = args.format
res = self.dog.dashboards()
report_warnings(res)
report_errors(res)
if not os.path.exists(args.pull_dir):
os.mkdir(args.pull_dir, 0o755)
used_filenames = set()
<SCANS>Pull a dashboard on the server into a local file')
pull_parser.add_argument('dashboard_id', help='ID of dashboard to pull')
pull_parser.add_argument('filename', help='file to pull dashboard into') # , type=argparse.FileType('wb'))
pull_parser.set_defaults(func=self._pull)
pull_all_parser = verb_parsers.add_parser('pull_all', help='Pull all dashboards into files in a directory')
pull_all_parser.add_argument('pull_dir', help='directory to pull dashboards into')
pull_all_parser.set_defaults(func=self._pull_all)
push_parser = verb_parsers.add_parser('push', help='Push updates to dashboards from local files to the server')
push_parser.add_argument('--append_auto_text', action='store_true', dest='append_auto_text',
filename = filename + "-" + dash_summary['id']
used_filenames.add(filename)
self._write_dash_to_file(dash_summary['id'],
os.path.join(args.pull_dir, filename + ".json"),
args.timeout,
format,
args.string_ids)
if format == 'pretty':
print(("\n### Total: {0} dashboards to {1} ###"
.format(len(used_filenames), os.path.realpath(args.pull_dir))))
def _new_file(self, args):
self.dog.timeout = args.timeout
format = args.format
res = self.dog.create_dashboard(args.filename,
"Description for {0}".format(args.filename), [])
report_warnings(res)
report_errors(res)
self._write_dash_to_file(res['dash']['id'], args.filename, args.timeout, format, args.string_ids)
if format == 'pretty':
print(self._pretty_json(res))
else:
print(json.dumps(res))
def _write_dash_to_file(self, dash_id, filename, timeout, format='raw', string_ids=False):
with open(filename, "w") as f:
res
<FILEB>
<CHANGES>
'video_id' : video.video_id,
<CHANGEE>
<FILEE>
<FILEB>
try:
video = models.Video.objects.get(youtube_videoid=youtube_videoid)
except models.Video.DoesNotExist:
video = models.Video(video_type=models.VIDEO_TYPE_YOUTUBE,
youtube_videoid=youtube_videoid,
allow_community_edits=True)
video.save()
else:
try:
video = models.Video.objects.get(video_url=video_url)
except models.Video.DoesNotExist:
video = models.Video(video_type=models.VIDEO_TYPE_HTML5,
video_url=video_url,
allow_community_edits=True)
video.save()
video.widget_views_count += 1
video.save()
return_value = {
<CHANGES>
'video_id' : video.id,
<CHANGEE>
'writelock_expiration' : models.WRITELOCK_EXPIRATION
}
# video_tab corresponds to mirosubs.widget.VideoTab.InitialState in
# javascript.
video_tab = 0
if null_widget:
null_captions = None
if request.user.is_authenticated:
null_captions = video.null_captions(request.user)
translation_language_codes = \
video.null_translation_language_codes(request.user)
else:
<FILEE>
<SCANS> edits, or i can freely edit it."""
maybe_add_video_session(request)
video = models.Video.objects.get(video_id=video_id)
if (not video.allow_community_edits and
video.owner != None and (request.user.is_anonymous() or
video.owner.pk != request.user.pk)):
return { "can_edit": False, "owned_by" : video.owner.username }
if not video.can_writelock(request):
return { "can_edit": False, "locked_by" : video.writelock_owner_name }
video.writelock(request)
video.save()
latest_captions = video.captions()
if latest_captions is None:
new_version_no = 0
existing_captions = []
else:
new_version_no = latest_captions.version_no + 1
existing_captions = list(latest_captions.videocaption_set.all())
return { "can_edit" : True,
"version" : new_version_no,
"existing" : [caption.to_json_dict() for
caption in existing_captions] }
def start_editing_null(request, video_id):
# FIXME: note duplication with start_editing, fix that.
if not request.user.is_authenticated():
captions = []
else:
video = models.Video.objects.get(video_id=video_id)
null_captions = video.null_captions(request.user)
if null_captions is None:
captions = []
else:
captions = list(null_captions.videocaption_set.all())
return { 'can_edit': True,
'version': 0,
'existing': [caption.to_json_dict() for
caption in captions] }
def start_translating(request, video_id, language_code, editing=False):
"""Called by widget whenever translating is about to commence or recommence."""
maybe_add_video_session(request)
video = models.Video.objects.get(video_id=video_id)
translation_language = video.translation_language(language_code)
if translation_language == None:
translation_language = models.TranslationLanguage(
video=video,
language=language_code,
writelock_session_key='')
translation_language.save()
# TODO: note duplication with start_editing. Figure out a way to fix this.
if not translation_language.can_writelock(request):
return { "can_edit": False,
"locked_by" : video.writelock_owner_name }
translation_language.writelock(request)
translation_language.save()
latest_translations = translation_language.translations()
if latest_translations is None:
new_version_no = 0
existing_translations = []
else:
new_version_no = latest_translations.version_no + 1
existing_translations = list(latest_translations.translation_set.
<FILEB>
<CHANGES>
return [f.replace(config.build_prefix + os.sep, '') for root, _, _ in os.walk(config.info_dir)
<CHANGEE>
<FILEE>
<FILEB>
copy_recipe(m, config)
copy_readme(m, config)
copy_license(m, config)
write_info_json(m, config) # actually index.json
write_about_json(m, config)
write_package_metadata_json(m, config)
write_info_files_file(m, files, config)
files_with_prefix = get_files_with_prefix(m, files, prefix)
create_info_files_json_v1(m, config.info_dir, prefix, files, files_with_prefix)
detect_and_record_prefix_files(m, files, prefix, config)
write_no_link(m, config, files)
if m.get_value('source/git_url'):
with io.open(join(config.info_dir, 'git'), 'w', encoding='utf-8') as fo:
source.git_info(config, fo)
if m.get_value('app/icon'):
utils.copy_into(join(m.path, m.get_value('app/icon')),
join(config.info_dir, 'icon.png'),
config.timeout, locking=config.locking)
<CHANGES>
return [f.replace(config.build_prefix + '/', '') for root, _, _ in os.walk(config.info_dir)
<CHANGEE>
for f in glob(os.path.join(root, '*'))]
def get_short_path(m, target_file):
entry_point_script_names = get_entry_point_script_names(m.get_value('build/entry_points'))
if is_noarch_python(m):
if target_file.find("site-packages") > 0:
return target_file[target_file.find("site-packages"):]
elif target_file.startswith("bin") and (target_file not in entry_point_script_names):
return target_file.replace("bin", "python-scripts")
elif target_file.startswith("Scripts") and (target_file not in entry_point_script_names):
return target_file.replace("Scripts", "python-scripts")
elif m.get_value('build<SCANS> windows
windows.build(m, build_file, config=config)
else:
build_file = join(m.path, 'build.sh')
# There is no sense in trying to run an empty build script.
if isfile(build_file) or script:
with utils.path_prepended(config.build_prefix):
env = environ.get_dict(config=config, m=m)
env["CONDA_BUILD_STATE"] = "BUILD"
work_file = join(config.work_dir, 'conda_build.sh')
if script:
with open(work_file, 'w') as bf:
bf.write(script)
if config.activate:
if isfile(build_file):
data = open(build_file).read()
else:
data = open(work_file).read()
with open(work_file, 'w') as bf:
bf.write('source "{conda_root}activate" "{build_prefix}" &> '
'/dev/null\n'.format(conda_root=utils.root_script_dir +
os.path.sep,
build_prefix=config.build_prefix))
bf.write(data)
else:
if not isfile(work_file):
utils.copy_into(build_file, work_file, config.timeout,
locking=config.locking)
os.chmod(work_file, 0o766)
if isfile(work_file):
cmd = [shell_path, '-x', '-e', work_file]
# this should raise if any problems occur while building
utils.check_call_env(cmd, env=env, cwd=src_dir)
if post in [True, None]:
if post:
with open(join(config.croot, 'prefix_files.txt'), 'r') as f:
files1 = set(f.read().splitlines())
get_build_metadata(m, config=config)
create_post_scripts(m, config=config)
if not is_noarch_python(m):
utils.create_entry_points(m.get_value('build/entry_points'), config=config)
files2 = prefix_
<FILEB>
<CHANGES>
self.assertContains(response, b"Currently")
<CHANGEE>
<FILEE>
<FILEB>
self.client.logout()
def test_inline_file_upload_edit_validation_error_post(self):
"""Test that inline file uploads correctly display prior data (#10002)."""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": six.text_type(self.picture.id),
"pictures-0-gallery": six.text_type(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post('/test_admin/%s/admin_views/gallery/%d/' % (self.urlbit, self.gallery.id), post_data)
<CHANGES>
self.assertTrue(response._container[0].find("Currently:") > -1)
<CHANGEE>
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminInlineTests(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
<FILEE>
<SCANS> admin
post_data = {
"author": "John Doe II",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/coverletter/%s/' % cl.pk,
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(CoverLetter.objects.count(), 1)
# Message should contain non-ugly model verbose name. Instance
# representation is set by model's __unicode__()
self.assertContains(
response,
'<li class="success">The cover letter "John Doe II" was changed successfully.</li>',
html=True
)
# model has no __unicode__ method
sm = ShortMessage.objects.create(content="This is expensive")
self.assertEqual(ShortMessage.objects.count(), 1)
response = self.client.get('/test_admin/admin/admin_views/shortmessage/%s/' % sm.pk)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"content": "Too expensive",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/shortmessage/%s/' % sm.pk,
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(ShortMessage.objects.count(), 1)
# Message should contain non-ugly model verbose name. The ugly(!)
# instance representation is set by six.text_type()
self.assertContains(
response,
'<li class="success">The short message "ShortMessage_Deferred_timestamp object" was changed successfully.</li>',
html=True
)
def test_edit_model_modeladmin_only_qs(self):
# Test for #14529. only() is used in ModelAdmin.get_queryset()
# model has __unicode__ method
t = Telegram.objects.create(title="Frist Telegram")
self.assertEqual(Telegram.objects.count(), 1)
response = self.client.get('/test_admin/admin/admin_views/telegram/%s/' % t.pk)
self.assertEqual(response.status_code, 200)
# Emulate model instance edit via the admin
post_data = {
"title": "Telegram without typo",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/telegram/%s/' %
<FILEB>
<CHANGES>
return self.table.c.laufnr.in_(subquery)
<CHANGEE>
<FILEE>
<FILEB>
if not document_filter.signature:
return None
signature = document_filter.signature.upper()
return or_(self.table.c.standort == signature,
self.table.c.standort.startswith("%s." % signature))
def _build_filetype_expression(self, document_filter):
'''Creates a filetype expression.'''
if not document_filter.filetype:
return None
subquery = select([self.table.c.hauptnr]).\
where(self.table.c.dateityp == document_filter.filetype)
return self.table.c.hauptnr.in_(subquery)
def _build_document_type_expression(self, document_filter):
'''Creates a filetype expression.'''
if not document_filter.document_type:
return None
subquery = select([self.table.c.hauptnr]).where(
self.table.c.doktyp == document_filter.document_type)
<CHANGES>
return self.table.c.hauptnr.in_(subquery)
<CHANGEE>
class DocumentDao(EntityDao):
'''Persistance for the Document domain entity.'''
@inject
def __init__(self,
db_engine: baseinjectorkeys.DB_ENGINE_KEY,
config: baseinjectorkeys.CONFIG_KEY,
creator_dao: baseinjectorkeys.CREATOR_DAO_KEY,
document_type_dao: baseinjectorkeys.DOCUMENT_TYPE_DAO_KEY,
creator_provider: baseinjectorkeys.CREATOR_PROVIDER_KEY):
# pylint: disable=too-many-arguments
super().__init__(db_engine, DOCUMENT_TABLE)
self.select_column = DOCUMENT_TABLE.c.hauptnr
<FILEE>
<SCANS>()
statistics.number_of_documents = self.get_count(self.table.c.hauptnr == self.table.c.laufnr)
for file_type in self.config.filetypes:
count = self.get_count(self.table.c.dateityp == file_type)
if count > 0:
statistics.number_of_files_by_type[file_type] = count
return statistics
def find(self, condition=None, page=None, page_size=1):
extended_expression = self.table.c.hauptnr == self.table.c.laufnr
if not condition is None:
extended_expression = and_(extended_expression, condition)
return super().find(extended_expression, page, page_size)
class DocumentFileInfoDao(EntityDao):
'''Dao for the document files. At the moment'''
'''needs to use the document table also used'''
'''by the document dao, because the data of these'''
'''two are mixed into one table.'''
@inject
def __init__(self,
db_engine: baseinjectorkeys.DB_ENGINE_KEY,
creator_provider: baseinjectorkeys.CREATOR_PROVIDER_KEY):
super().__init__(db_engine, DOCUMENT_TABLE)
self.creator_provider = creator_provider
self.table = DOCUMENT_TABLE
# pylint: disable=arguments-differ
def get_by_id(self, document_file_id):
query = select([self.table])\
.where(and_(self.table.c.laufnr == document_file_id,
self.table.c.seite != None))
return self._get_exactly_one(query)
def get_file_infos_for_document(self, document_id):
'''Gets a list of all the document files for a certain document.'''
query = select([self.table]).where(
and_(self.table.c.hauptnr == document_id,
self.table.c.seite != None)).\
order_by(self.table.c.seite)
return self._get_list(query)
def create_new_file_info(self, document_id, filetype=None, resolution=None):
'''Transaction wrapper method for _create_new_file_info.'''
return self.transactional(
self._create_new_file_info,
document_id, filetype, resolution)
def _create_new_file_info(self, document_id, filetype, resolution):
'''Creates a new entry into the document table for this document file.'''
page = self._get_next_page(document_id)
if page == 1:
file
<FILEB>
<CHANGES>
static_folder = pjoin(global_settings.applications_parent,
<CHANGEE>
<FILEE>
<FILEB>
path = urllib.unquote(request.env.path_info) or '/'
path = path.replace('\\', '/')
if path.endswith('/') and len(path) > 1:
path = path[:-1]
match = regex_url.match(path)
if not match:
invalid_url(routes)
request.raw_args = (match.group('s') or '')
if request.raw_args.startswith('/'):
request.raw_args = request.raw_args[1:]
if match.group('c') == 'static':
application = match.group('a')
version, filename = None, match.group('z').replace(' ', '_')
if not filename:
raise HTTP(404)
items = filename.split('/', 1)
if regex_version.match(items[0]):
version, filename = items
<CHANGES>
static_folder = pjoin(global_settings.application_parent,
<CHANGEE>
'applications', application, 'static')
static_file = os.path.abspath(pjoin(static_folder, filename))
if not static_file.startswith(static_folder):
invalid_url(routes)
return (static_file, version, environ)
else:
# ##################################################
# parse application, controller and function
# ##################################################
request.application = match.group('a') or routes.default_application
request.controller = match.group('c') or routes.default_controller
request.function = match.group('f') or routes.default_function
<FILEE>
<SCANS> self.application not in routers and \
(self.application != THREAD_LOCAL.routes.default_application or self.application == 'welcome'):
raise HTTP(
400, THREAD_LOCAL.routes.error_message % 'invalid request',
web2py_error="unknown application: '%s'" % self.application)
# set the application router
#
log_rewrite("select application=%s" % self.application)
self.request.application = self.application
if self.application not in routers:
self.router = routers.BASE # support gluon.main.wsgibase init->welcome
else:
self.router = routers[self.application] # application router
self.controllers = self.router.controllers
self.default_controller = self.domain_controller or self.router.default_controller
self.functions = self.router.functions
self.languages = self.router.languages
self.default_language = self.router.default_language
self.map_hyphen = self.router.map_hyphen
self.exclusive_domain = self.router.exclusive_domain
self._acfe_match = self.router._acfe_match
self.file_match = self.router.file_match
self._file_match = self.router._file_match
self._args_match = self.router._args_match
def map_root_static(self):
"""Handles root-static files (no hyphen mapping)"""
"""a root-static file is one whose incoming URL expects it to be at the root,"""
"""typically robots.txt & favicon.ico"""
if len(self.args) == 1 and self.arg0 in self.router.root_static:
self.controller = self.request.controller = 'static'
root_static_file = pjoin(global_settings.applications_parent,
'applications', self.application,
self.controller, self.arg0)
log_rewrite("route: root static=%s" % root_static_file)
return root_static_file, None
return None, None
def map_language(self):
"""Handles language (no hyphen mapping)"""
arg0 = self.arg0 # no hyphen mapping
if arg0 and self.languages and arg0 in self.languages:
self.language = arg0
else:
self.language = self.default_language
if self.language:
log_rewrite("route: language=%s" % self.language)
self.pop_arg_if(self.language == arg0)
arg0 = self.arg0
def map_controller(self):
"""Identifies controller"""
# handle controller
#
arg0 = self.harg0 # map hyphens
if not arg0 or (self.controllers and arg0 not in self.controllers):
self.controller = self.default_controller or ''
else:
self.controller = arg0
<FILEB>
<CHANGES>
Column( "contents", JSONType() ),
<CHANGEE>
<FILEE>
<FILEB>
Column( "library_folder_id", Integer, ForeignKey( "library_folder.id" ), nullable=True, index=True ),
Column( "library_item_info_template_id", Integer, ForeignKey( "library_item_info_template.id" ), index=True ) )
LibraryDatasetInfoTemplateAssociation.table = Table( "library_dataset_info_template_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "library_dataset_id", Integer, ForeignKey( "library_dataset.id" ), nullable=True, index=True ),
Column( "library_item_info_template_id", Integer, ForeignKey( "library_item_info_template.id" ), index=True ) )
LibraryDatasetDatasetInfoTemplateAssociation.table = Table( "library_dataset_dataset_info_template_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "library_dataset_dataset_association_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), nullable=True, index=True ),
Column( "library_item_info_template_id", Integer, ForeignKey( "library_item_info_template.id" ), index=True ) )
LibraryItemInfoElement.table = Table( "library_item_info_element", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
<CHANGES>
Column( "contents", TEXT ),
<CHANGEE>
Column( "library_item_info_id", Integer, ForeignKey( "library_item_info.id" ), index=True ),
Column( "library_item_info_template_element_id", Integer, ForeignKey( "library_item_info_template_element.id" ), index=True ) )
LibraryItemInfo.table<SCANS>_info_template_element", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "optional", Boolean, index=True, default=True ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "name", TEXT ),
Column( "description", TEXT ),
Column( "type", TEXT, default='string' ),
Column( "order_id", Integer ),
Column( "options", JSONType() ),
Column( "library_item_info_template_id", Integer, ForeignKey( "library_item_info_template.id" ), index=True ) )
LibraryItemInfoTemplate.table = Table( "library_item_info_template", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "optional", Boolean, index=True, default=True ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "name", TEXT ),
Column( "description", TEXT ),
Column( "item_count", Integer, default=0 ) )
LibraryInfoTemplateAssociation.table = Table( "library_info_template_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "library_id", Integer, ForeignKey( "library.id" ), nullable=True, index=True ),
Column( "library_item_info_template_id", Integer, ForeignKey( "library_item_info_template.id" ), index=True ) )
LibraryFolderInfoTemplateAssociation.table = Table( "library_folder_info_template_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
<FILEB>
<CHANGES>
self.assertContains(response, b"Currently")
<CHANGEE>
<FILEE>
<FILEB>
self.client.logout()
def test_inline_file_upload_edit_validation_error_post(self):
"""Test that inline file uploads correctly display prior data (#10002)."""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": six.text_type(self.picture.id),
"pictures-0-gallery": six.text_type(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post('/test_admin/%s/admin_views/gallery/%d/' % (self.urlbit, self.gallery.id), post_data)
<CHANGES>
self.assertTrue(response._container[0].find("Currently:") > -1)
<CHANGEE>
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminInlineTests(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
<FILEE>
<SCANS> test_save_as_duplication(self):
"""Ensure save as actually creates a new person"""
post_data = {'_saveasnew': '', 'name': 'John M', 'gender': 1, 'age': 42}
self.client.post('/test_admin/admin/admin_views/person/1/', post_data)
self.assertEqual(len(Person.objects.filter(name='John M')), 1)
self.assertEqual(len(Person.objects.filter(id=1)), 1)
def test_save_as_display(self):
"""Ensure that 'save as' is displayed when activated and after submitting"""
"""invalid data aside save_as_new will not show us a form to overwrite the"""
"""initial model."""
response = self.client.get('/test_admin/admin/admin_views/person/1/')
self.assertTrue(response.context['save_as'])
post_data = {'_saveasnew': '', 'name': 'John M', 'gender': 3, 'alive': 'checked'}
response = self.client.post('/test_admin/admin/admin_views/person/1/', post_data)
self.assertEqual(response.context['form_url'], '/test_admin/admin/admin_views/person/add/')
class CustomModelAdminTest(AdminViewBasicTestCase):
urls = "admin_views.urls"
urlbit = "admin2"
def testCustomAdminSiteLoginForm(self):
self.client.logout()
response = self.client.get('/test_admin/admin2/')
self.assertIsInstance(response, TemplateResponse)
self.assertEqual(response.status_code, 200)
login = self.client.post('/test_admin/admin2/', {
REDIRECT_FIELD_NAME: '/test_admin/admin2/',
LOGIN_FORM_KEY: 1,
'username': 'customform',
'password': 'secret',
})
self.assertIsInstance(login, TemplateResponse)
self.assertEqual(login.status_code, 200)
self.assertContains(login, 'custom form error')
def testCustomAdminSiteLoginTemplate(self):
self.client.logout()
response = self.client.get('/test_admin/admin2/')
self.assertIsInstance(response, TemplateResponse)
self.assertTemplateUsed(response, 'custom_admin/login.html')
self.assertContains(response, 'Hello from a custom login template')
def testCustomAdminSiteLogoutTemplate(self):
response = self.client.get('/test_admin/admin2/logout/')
self.assertIs
<FILEB>
<CHANGES>
db = db_connect()
<CHANGEE>
<FILEE>
<FILEB>
""""message": - operation result description"""
""""match_id": - match_id of match_report"""
"""}"""
try:
match_id = None
if type(data).__name__ == 'str':
data = parse_stats_submission( data )
if is_instagib(data):
data["game_meta"]["G"] = "i" + data["game_meta"]["G"]
if is_tdm2v2(data):
data["game_meta"]["G"] = "tdm2v2"
match_id = data["game_meta"]["I"]
if data["game_meta"]["G"] not in GAMETYPE_IDS:
return {
"ok": False,
"message": "gametype is not accepted: " + data["game_meta"]["G"],
"match_id": match_id
}
<CHANGES>
db = db.connect()
<CHANGEE>
cu = db.cursor()
team_scores = [None, None]
team_index = -1
for team_data in data["teams"]:
team_index = int( team_data["Q"].replace("team#", "") ) - 1
for key in ["scoreboard-rounds", "scoreboard-caps", "scoreboard-score"]:
if key in team_data:
team_scores[team_index] = int(team_data[key])
team1_score, team2_score = team_scores
match_timestamp = int( data["game_meta"]["1"] )
cu.execute("INSERT INTO matches (match_id, gametype_id, factory_id, map_id, timestamp, duration, team1_score, team2_score, post_processed) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", [
match_id,
<FILEE>
<SCANS>
"message": type(e).__name__ + ": " + str(e)
}
cu.close()
db.close()
return result
def reset_gametype_ratings( gametype ):
"""Resets ratings for gametype"""
if gametype not in GAMETYPE_IDS:
print("gametype is not accepted: " + gametype)
return False
gametype_id = GAMETYPE_IDS[gametype]
result = False
try:
db = db_connect()
cu = db.cursor()
cw = db.cursor()
cw.execute('UPDATE matches SET post_processed = FALSE WHERE gametype_id = %s', [gametype_id])
cw.execute('UPDATE gametype_ratings SET mean = %s, deviation = %s, n = 0 WHERE gametype_id = %s', [trueskill.MU, trueskill.SIGMA, gametype_id])
scoreboard_query = '''SELECT'''
'''s.match_id,'''
'''MIN(m.team1_score) AS team1_score,'''
'''MIN(m.team2_score) AS team1_score,'''
'''array_agg(json_build_object('''
''''P', s.steam_id,'''
''''t', s.team,'''
''''alivetime', s.alive_time,'''
''''scoreboard-score', s.score,'''
''''scoreboard-pushes', s.damage_dealt,'''
''''scoreboard-destroyed', s.damage_taken,'''
''''scoreboard-kills', s.frags,'''
''''scoreboard-deaths', s.deaths,'''
''''medal-captures', mm.medals->'captures','''
''''medal-defends', mm.medals->'defends','''
''''medal-assists', mm.medals->'assists''''
'''))'''
'''FROM'''
'''scoreboards s'''
'''LEFT JOIN matches m ON m.match_id = s.match_id'''
'''LEFT JOIN ('''
'''SELECT'''
'''sm.steam_id, sm.team, sm.match_id,'''
'''json_object_agg(mm.medal_short, sm.count) as medals'''
'''FROM'''
'''scoreboards_medals sm'''
'''LEFT JOIN'''
'''medals mm ON mm.medal_id = sm.medal_id'''
'''GROUP BY sm.steam_id, sm.team, sm.match_id'''
''') mm ON mm.
<FILEB>
<CHANGES>
X509Extension(b('basicConstraints'), True, b('CA:false'))])
<CHANGEE>
<FILEE>
<FILEB>
request = X509Req()
subject = request.get_subject()
self.assertTrue(
isinstance(subject, X509NameType),
"%r is of type %r, should be %r" % (subject, type(subject), X509NameType))
subject.commonName = "foo"
self.assertEqual(request.get_subject().commonName, "foo")
del request
subject.commonName = "bar"
self.assertEqual(subject.commonName, "bar")
def test_get_subject_wrong_args(self):
request = X509Req()
self.assertRaises(TypeError, request.get_subject, None)
def test_add_extensions(self):
"""L{X509Req.add_extensions} accepts a C{list} of L{X509Extension}"""
"""instances and adds them to the X509 request."""
request = X509Req()
request.add_extensions([
<CHANGES>
X509Extension('basicConstraints', True, 'CA:false')])
<CHANGEE>
# XXX Add get_extensions so the rest of this unit test can be written.
def test_add_extensions_wrong_args(self):
"""L{X509Req.add_extensions} raises L{TypeError} if called with the wrong"""
"""number of arguments or with a non-C{list}. Or it raises L{ValueError}"""
"""if called with a C{list} containing objects other than L{X509Extension}"""
"""instances."""
request = X509Req()
self.assertRaises(TypeError, request.add_extensions)
self.assertRaises(TypeError, request.add_extensions, object())
self.assertRaises(ValueError, request.add_extensions, [object()])
self.assertRaises(TypeError, request.add_extensions, [], None)
class X509Tests(TestCase, _PKeyInteractionTestsMixin):
<FILEE>
<SCANS>sCT/R+6vsKAAxNTcBjUeZjlykCJWy5ojShGftXIKY"""
"""w/njVbKMXrvc83qmTdGl3TAM0fxQIpqgcglFLveEBgzn"""
"""-----END CERTIFICATE-----""")
cleartextPrivateKeyPEM = b("""-----BEGIN RSA PRIVATE KEY-----"""
"""MIICXQIBAAKBgQD5mkLpi7q6ROdu7khB3S9aanA0Zls7vvfGOmB80/yeylhGpsjA"""
"""jWen0VtSQke/NlEPGtO38tsV7CsuFnSmschvAnGrcJl76b0UOOHUgDTIoRxC6QDU"""
"""3claegwsrBA+sJEBbqx5RdXbIRGicPG/8qQ4Zm1SKOgotcbwiaor2yxZ2wIDAQAB"""
"""AoGBAPCgMpmLxzwDaUmcFbTJUvlLW1hoxNNYSu2jIZm1k/hRAcE60JYwvBkgz3UB"""
"""yMEh0AtLxYe0bFk6EHah11tMUPgscbCq73snJ++8koUw+csk22G65hOs51bVb7Aa"""
"""6JBe67oLzdtvgCUFAA2qfrKzWRZzAdhUirQUZgySZk+Xq1pBAkEA/kZG0A6roTSM"""
"""BVnx7LnPfsycKUsTumorpXiylZJjTi9XtmzxhrYN6wgZlDOOwOLgSQhszGpxVoMD"""
"""u3gByT1b2QJBAPtL3mSKdvwRu/+40zaZLwvSJRxaj0mcE4BJOS6Oqs/hS1xRlrNk"""
"""PpQ7WJ4yM6ZOLnXzm2mKyxm50Mv64109FtMCQQDOqS2KkjHaLowTGVxwC0DijMfr"""
"""I9Lf8sSQk32J5VWCySWf5gGTfEnpmUa41gKTMJIbqZZLucNuDcOtzUaeWZlZAkA8"""
"""ttXigLnCqR486JDPTi9ZscoZkZ+w7y6e/hH8t6d5Vjt48
<FILEB>
<CHANGES>
if curEp in (MULTI_EP_RESULT, SEASON_RESULT):
<CHANGEE>
<FILEE>
<FILEB>
notNeededEps.append(epNum)
else:
neededEps.append(epNum)
else:
neededEps.append(epNum)
logger.log("Result is neededEps: "+str(neededEps)+", notNeededEps: "+str(notNeededEps), logger.DEBUG)
if not neededEps:
logger.log("All of these episodes were covered by single nzbs, ignoring this multi-ep result", logger.DEBUG)
continue
# don't bother with the single result if we're going to get it with a multi result
for epObj in multiResult.episodes:
epNum = epObj.episode
if epNum in foundResults:
logger.log("A needed multi-episode result overlaps with episode "+str(epNum)+", removing its results from the list", logger.DEBUG)
del foundResults[epNum]
finalResults.append(multiResult)
# of all the single ep results narrow it down to the best one for each episode
for curEp in foundResults:
<CHANGES>
if curEp == MULTI_EP_RESULT:
<CHANGEE>
continue
if len(foundResults[curEp]) == 0:
continue
finalResults.append(pickBestResult(foundResults[curEp]))
return finalResults
<FILEE>
<SCANS> continue
try:
curFoundResults = curProvider.findEpisode(episode, manualSearch=manualSearch)
except exceptions.AuthException, e:
logger.log("Authentication error: "+str(e), logger.ERROR)
continue
except Exception, e:
logger.log("Error while searching "+curProvider.providerName+", skipping: "+str(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
continue
didSearch = True
# skip non-tv crap
curFoundResults = filter(lambda x: all([y not in x.extraInfo[0].lower() for y in resultFilters]), curFoundResults)
foundResults += curFoundResults
if not didSearch:
logger.log("No providers were used for the search - check your settings and ensure that either NZB/Torrents is selected and at least one NZB provider is being used.", logger.ERROR)
bestResult = pickBestResult(foundResults)
return bestResult
def findSeason(show, season):
logger.log("Searching for stuff we need from "+show.name+" season "+str(season))
foundResults = {}
didSearch = False
for curProvider in providers.getAllModules():
if not curProvider.isActive():
continue
try:
curResults = curProvider.findSeasonResults(show, season)
# make a list of all the results for this provider
for curEp in curResults:
# skip non-tv crap
curResults[curEp] = filter(lambda x: all([y not in x.extraInfo[0].lower() for y in resultFilters]), curResults[curEp])
if curEp in foundResults:
foundResults[curEp] += curResults[curEp]
else:
foundResults[curEp] = curResults[curEp]
except exceptions.AuthException, e:
logger.log("Authentication error: "+str(e), logger.ERROR)
continue
except Exception, e:
logger.log("Error while searching "+curProvider.providerName+", skipping: "+str(e), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
continue
didSearch = True
if not didSearch:
logger.log("No providers were used for the search - check your settings and ensure that either NZB/Torrents is selected and at least one NZB provider is being used.", logger.ERROR)
finalResults = []
# pick the best season NZB
bestSeasonNZB = None
if SEASON_RESULT in foundResults:
bestSeasonNZB = pickBestResult(foundResults[SEASON_RESULT])
# see if every episode is wanted
if bestSeasonNZB:
# get the quality of the season nzb
seasonQual = Quality.nameQuality(bestSeasonNZB.extraInfo[0])
logger.log("The quality of the season NZB is "+Quality.qualityStrings[seasonQual], logger.DEBUG)
my
<FILEB>
<CHANGES>
@material(blockid=68, data=[2, 3, 4, 5], transparent=True)
<CHANGEE>
<FILEE>
<FILEB>
# but since ladders can apparently be placed on transparent blocks, we
# have to render this thing anyway. same for data == 2
tex = transform_image_side(raw_texture)
composite.alpha_over(img, tex, (0,6), tex)
return generate_texture_tuple(img, blockID)
if data == 2:
tex = transform_image_side(raw_texture).transpose(Image.FLIP_LEFT_RIGHT)
composite.alpha_over(img, tex, (12,6), tex)
return generate_texture_tuple(img, blockID)
if data == 3:
tex = transform_image_side(raw_texture).transpose(Image.FLIP_LEFT_RIGHT)
composite.alpha_over(img, tex, (0,0), tex)
return generate_texture_tuple(img, blockID)
if data == 4:
tex = transform_image_side(raw_texture)
composite.alpha_over(img, tex, (12,0), tex)
return generate_texture_tuple(img, blockID)
# wall signs
<CHANGES>
@material(blockid=68, data=[2, 3, 4, 5], trasnparent=True)
<CHANGEE>
def wall_sign(blockid, data, north): # wall sign
# first north rotations
if north == 'upper-left':
if data == 2: data = 5
elif data == 3: data = 4
elif data == 4: data = 2
elif data == 5: data = 3
elif north == 'upper-right':
if data == 2: data = 3
elif data == 3: data = 2
elif data == 4: data = 5
elif data == 5: data = 4
<FILEE>
<SCANS>, (0,4), v_stick)
composite.alpha_over(img, img2, (0,0), img2)
elif data & 0x07 == 0x2: # east
img = build_full_block(side_t ,None ,None ,side_t.rotate(90), None)
temp = transform_image_side(back_t).transpose(Image.FLIP_LEFT_RIGHT)
composite.alpha_over(img, temp, (2,2), temp)
composite.alpha_over(img, h_stick, (6,3), h_stick)
elif data & 0x07 == 0x3: # west
img = Image.new("RGBA", (24,24), bgcolor)
img2 = build_full_block(side_t.rotate(180) ,None ,None ,side_t.rotate(270), piston_t)
composite.alpha_over(img, h_stick, (0,0), h_stick)
composite.alpha_over(img, img2, (0,0), img2)
elif data & 0x07 == 0x4: # north
img = build_full_block(side_t.rotate(90) ,None ,None , piston_t, side_t.rotate(270))
composite.alpha_over(img, h_stick.transpose(Image.FLIP_LEFT_RIGHT), (0,0), h_stick.transpose(Image.FLIP_LEFT_RIGHT))
elif data & 0x07 == 0x5: # south
img = Image.new("RGBA", (24,24), bgcolor)
img2 = build_full_block(side_t.rotate(270) ,None ,None ,None, side_t.rotate(90))
temp = transform_image_side(back_t)
composite.alpha_over(img2, temp, (10,2), temp)
composite.alpha_over(img, img2, (0,0), img2)
composite.alpha_over(img, h_stick.transpose(Image.FLIP_LEFT_RIGHT), (-3,2), h_stick.transpose(Image.FLIP_LEFT_RIGHT))
return img
# cobweb
sprite(blockid=30, index=11)
@material(blockid=31, data=range(3), transparent=True)
def tall_grass(blockid, data):
if data == 0: # dead shrub
texture = terrain_images
<FILEB>
<CHANGES>
self.assertContains(response, b"Currently")
<CHANGEE>
<FILEE>
<FILEB>
self.client.logout()
def test_inline_file_upload_edit_validation_error_post(self):
"""Test that inline file uploads correctly display prior data (#10002)."""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": six.text_type(self.picture.id),
"pictures-0-gallery": six.text_type(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post('/test_admin/%s/admin_views/gallery/%d/' % (self.urlbit, self.gallery.id), post_data)
<CHANGES>
self.assertTrue(response._container[0].find("Currently:") > -1)
<CHANGEE>
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminInlineTests(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
<FILEE>
<SCANS>ChangeDone(self):
"Check the never-cache status of the password change done view"
response = self.client.get('/test_admin/admin/password_change/done/')
self.assertEqual(get_max_age(response), None)
def testJsi18n(self):
"Check the never-cache status of the JavaScript i18n view"
response = self.client.get('/test_admin/admin/jsi18n/')
self.assertEqual(get_max_age(response), None)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class PrePopulatedTest(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_prepopulated_on(self):
response = self.client.get('/test_admin/admin/admin_views/prepopulatedpost/add/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "id: '#id_slug',")
self.assertContains(response, "field['dependency_ids'].push('#id_title');")
self.assertContains(response, "id: '#id_prepopulatedsubpost_set-0-subslug',")
def test_prepopulated_off(self):
response = self.client.get('/test_admin/admin/admin_views/prepopulatedpost/1/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "A Long Title")
self.assertNotContains(response, "id: '#id_slug'")
self.assertNotContains(response, "field['dependency_ids'].push('#id_title');")
self.assertNotContains(response, "id: '#id_prepopulatedsubpost_set-0-subslug',")
@override_settings(USE_THOUSAND_SEPARATOR=True, USE_L10N=True)
def test_prepopulated_maxlength_localized(self):
"""Regression test for #15938: if USE_THOUSAND_SEPARATOR is set, make sure"""
"""that maxLength (in the JavaScript) is rendered without separators."""
response = self.client.get('/test_admin/admin/admin_views/prepopulatedpostlargeslug/add/')
self.assertContains(response, "maxLength: 1000") # instead of 1,0
<FILEB>
<CHANGES>
localExtList = extList - extBlu - extDir
<CHANGEE>
<FILEE>
<FILEB>
del pathreal
del pathislink
del pathsplitext
# We don't want any folders
return [], filelist
def __createDirList(self, path):
subdirlist, filelist = [], []
dvdStruct = None
pathname, ext = "", ""
# Improve performance and avoid dots
movie_trashpath = config.EMC.movie_trashcan_enable.value and os.path.realpath(config.EMC.movie_trashcan_path.value)
check_dvdstruct = config.EMC.check_dvdstruct.value \
and not (config.EMC.cfgscan_suppress.value and path in self.nostructscan)
check_moviestruct = config.EMC.check_moviestruct.value \
and not (config.EMC.cfgscan_suppress.value and path in self.nostructscan)
check_blustruct = config.EMC.check_blustruct.value \
and not (config.EMC.cfgscan_suppress.value and path in self.nostructscan)
hideitemlist = config.EMC.cfghide_enable.value and self.hideitemlist
<CHANGES>
localExtList = extList - extBlu
<CHANGEE>
dappend = subdirlist.append
fappend = filelist.append
splitext = os.path.splitext
pathjoin = os.path.join
OSErrorMessage = _("OSError - please reload list (EMC menu)")
if os.path.exists(path):
# Get directory listing
walk_dirs = []
walk_files = []
try:
walk_listdir = os.listdir(path)
except OSError:
<FILEE>
<SCANS> newPiconRenderer:
from Components.Renderer.Picon import getPiconName
try:
from enigma import BT_SCALE, BT_KEEP_ASPECT_RATIO
except ImportError as ie:
newPiconRenderer = False
BT_SCALE = None
BT_KEEP_ASPECT_RATIO = None
global extAudio, extDvd, extVideo, extPlaylist, extList, extMedia, extBlu
global cmtDir, cmtUp, cmtTrash, cmtLRec, cmtVLC, cmtBME2, cmtBMEMC, virVLC, virAll, virToE, virToD
global vlcSrv, vlcDir, vlcFil
global plyDVB, plyM2TS, plyDVD, plyMP3, plyVLC, plyAll
global sidDVB, sidDVD, sidMP3
# Set definitions
# Media types
extAudio = frozenset([".ac3", ".dts", ".flac", ".m4a", ".mp2", ".mp3", ".ogg", ".wav", ".wma", ".aac"])
extVideo = frozenset([".ts", ".trp", ".avi", ".divx", ".f4v", ".flv", ".img", ".ifo", ".iso", ".m2ts", ".m4v", ".mkv", ".mov", ".mp4", ".mpeg", ".mpg", ".mts", ".vob", ".wmv", ".bdmv", ".asf", ".stream", ".webm"])
extPlaylist = frozenset([".m3u", ".e2pls"])#, ".pls"])
extMedia = extAudio | extVideo | extPlaylist
extDir = frozenset([""])
extList = extMedia | extDir
# Additional file types
extTS = frozenset([".ts", ".trp"])
extM2ts = frozenset([".m2ts"])
#extDvd = frozenset([".iso", ".img", ".ifo"])
extIfo = frozenset([".ifo"])
extIso = frozenset([".iso", ".img"])
extDvd = extIfo | extIso
extVLC = frozenset([vlcFil])
extBlu = frozenset([".bdmv"])
# blue disk movie
# mimetype("video/x-bluray") ext (".bdmv")
# Player types
plyDVB = extTS # ServiceDVB
plyM2TS = extM2ts # ServiceM2TS
plyDVD = extDvd # ServiceDVD
plyMP3 = extMedia - plyDVB - plyM2TS -
<FILEB>
<CHANGES>
parameters['additional_owners'] = ','.join(map(str,additional_owners))
<CHANGEE>
<FILEE>
<FILEB>
"""File-like object to upload."""
"""additional_owners: additional Twitter users that are allowed to use"""
"""The uploaded media. Should be a list of integers. Maximum"""
"""number of additional owners is capped at 100 by Twitter."""
"""media_category:"""
"""Category with which to identify media upload. Only use with Ads"""
"""API & video files."""
"""Returns:"""
"""tuple: media_id (returned from Twitter), file-handler object (i.e., has .read()"""
"""method), filename media file."""
url = '%s/media/upload.json' % self.upload_url
media_fp, filename, file_size, media_type = parse_media_file(media, async_upload=True)
if not all([media_fp, filename, file_size, media_type]):
raise TwitterError({'message': 'Could not process media file'})
parameters = {}
if additional_owners and len(additional_owners) > 100:
raise TwitterError({'message': 'Maximum of 100 additional owners may be specified for a Media object'})
if additional_owners:
<CHANGES>
parameters['additional_owners'] = additional_owners
<CHANGEE>
if media_category:
parameters['media_category'] = media_category
# INIT doesn't read in any data. It's purpose is to prepare Twitter to
# receive the content in APPEND requests.
parameters['command'] = 'INIT'
parameters['media_type'] = media_type
parameters['total_bytes'] = file_size
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
try:
media_id = data['media_id']
except KeyError:
<FILEE>
<SCANS>."""
"""slug (str, optional):"""
"""You can identify a list by its slug instead of its numerical id."""
"""If you decide to do so, note that you'll also have to specify the"""
"""list owner using the owner_id or owner_screen_name parameters."""
"""user_id (int, optional):"""
"""The user_id or a list of user_id's to add to the list."""
"""If not given, then screen_name is required."""
"""screen_name (str, optional):"""
"""The screen_name or a list of screen_name's to add to the list."""
"""If not given, then user_id is required."""
"""owner_screen_name (str, optional):"""
"""The screen_name of the user who owns the list being requested by"""
"""a slug."""
"""owner_id (int, optional):"""
"""The user ID of the user who owns the list being requested by"""
"""a slug."""
"""Returns:"""
"""twitter.list.List: A twitter.List instance representing the list"""
"""subscribed to."""
is_list = False
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if user_id:
if isinstance(user_id, list) or isinstance(user_id, tuple):
is_list = True
uids = [str(enf_type('user_id', int, uid)) for uid in user_id]
parameters['user_id'] = ','.join(uids)
else:
parameters['user_id'] = enf_type('user_id', int, user_id)
elif screen_name:
if isinstance(screen_name, list) or isinstance(screen_name, tuple):
is_list = True
parameters['screen_name'] = ','.join(screen_name)
else:
parameters['screen_name'] = screen_name
if is_list:
url = '%s/lists/members/create_all.json' % self.base_url
else:
url = '%s/lists/members/create.json' % self.base_url
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def DestroyListsMember(self,
list_id=None,
slug=None,
owner_screen_name=None,
owner_id=None,
user_id=None,
screen_name=None):
"""Destroys the subscription
<FILEB>
<CHANGES>
method = luigi.EnumParameter(enum=Method, default=Method.shear)
<CHANGEE>
<FILEE>
<FILEB>
'R10m': 700,
'R20m': 350,
'R60m': 120}
return buf[group]
@luigi.Task.event_handler(luigi.Event.FAILURE)
def on_failure(task, exception):
"""Capture any Task Failure here."""
ERROR_LOGGER.error(task=task.get_task_family(),
params=task.to_str_params(),
scene=task.level1,
exception=exception.__str__(),
traceback=traceback.format_exc().splitlines())
class DataStandardisation(luigi.Task):
"""Runs the standardised product workflow."""
level1 = luigi.Parameter()
outdir = luigi.Parameter()
model = luigi.EnumParameter(enum=Model)
vertices = luigi.TupleParameter(default=(5, 5))
<CHANGES>
method = luigi.Parameter(default='shear')
<CHANGEE>
pixel_quality = luigi.BoolParameter()
land_sea_path = luigi.Parameter()
aerosol_fname = luigi.Parameter(significant=False)
brdf_path = luigi.Parameter(significant=False)
brdf_premodis_path = luigi.Parameter(significant=False)
ozone_path = luigi.Parameter(significant=False)
water_vapour_path = luigi.Parameter(significant=False)
dem_path = luigi.Parameter(significant=False)
ecmwf_path = luigi.Parameter(significant=False)
invariant_height_fname = luigi.Parameter(significant=False)
dsm_fname = luigi.Parameter(significant=False)
modtran_exe = luigi.Parameter(significant=False)
<FILEE>
<SCANS>_fname, self.brdf_path, self.brdf_premodis_path,
self.ozone_path, self.water_vapour_path, self.dem_path,
self.dsm_fname, self.invariant_height_fname,
self.modtran_exe, out_fname, ecmwf_path, self.rori,
self.compression, self.acq_parser_hint)
class ARD(luigi.WrapperTask):
"""Kicks off ARD tasks for each level1 entry."""
level1_list = luigi.Parameter()
outdir = luigi.Parameter()
model = luigi.EnumParameter(enum=Model)
vertices = luigi.TupleParameter(default=(5, 5))
method = luigi.EnumParameter(enum=Method, default=Method.shear)
pixel_quality = luigi.BoolParameter()
land_sea_path = luigi.Parameter()
aerosol_fname = luigi.Parameter(significant=False)
brdf_path = luigi.Parameter(significant=False)
brdf_premodis_path = luigi.Parameter(significant=False)
ozone_path = luigi.Parameter(significant=False)
water_vapour_path = luigi.Parameter(significant=False)
dem_path = luigi.Parameter(significant=False)
ecmwf_path = luigi.Parameter(significant=False)
invariant_height_fname = luigi.Parameter(significant=False)
dsm_fname = luigi.Parameter(significant=False)
modtran_exe = luigi.Parameter(significant=False)
tle_path = luigi.Parameter(significant=False)
rori = luigi.FloatParameter(default=0.52, significant=False)
compression = luigi.Parameter(default='lzf', significant=False)
def requires(self):
with open(self.level1_list) as src:
level1_scenes = [scene.strip() for scene in src.readlines()]
for scene in level1_scenes:
kwargs = {'level1': scene,
'model': self.model,
'vertices': self.vertices,
'pixel_quality': self.pixel_quality,
'method': self.method,
'modtran_exe': self.modtran_exe,
'outdir': self.outdir,
'land_sea_path': self.land_sea_path,
'aerosol_fname': self.aerosol_fname,
'brdf_path': self.brdf_path,
'brdf_premodis_path':
<FILEB>
<CHANGES>
self.assertContains(response, b"Currently")
<CHANGEE>
<FILEE>
<FILEB>
self.client.logout()
def test_inline_file_upload_edit_validation_error_post(self):
"""Test that inline file uploads correctly display prior data (#10002)."""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": six.text_type(self.picture.id),
"pictures-0-gallery": six.text_type(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post('/test_admin/%s/admin_views/gallery/%d/' % (self.urlbit, self.gallery.id), post_data)
<CHANGES>
self.assertTrue(response._container[0].find("Currently:") > -1)
<CHANGEE>
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminInlineTests(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
<FILEE>
<SCANS>HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminJavaScriptTest(TestCase):
fixtures = ['admin-views-users.xml']
urls = "admin_views.urls"
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_js_minified_only_if_debug_is_false(self):
"""Ensure that the minified versions of the JS files are only used when"""
"""DEBUG is False."""
"""Refs #17521."""
with override_settings(DEBUG=False):
response = self.client.get(
'/test_admin/%s/admin_views/section/add/' % 'admin')
self.assertNotContains(response, 'jquery.js')
self.assertContains(response, 'jquery.min.js')
self.assertNotContains(response, 'prepopulate.js')
self.assertContains(response, 'prepopulate.min.js')
self.assertNotContains(response, 'actions.js')
self.assertContains(response, 'actions.min.js')
self.assertNotContains(response, 'collapse.js')
self.assertContains(response, 'collapse.min.js')
self.assertNotContains(response, 'inlines.js')
self.assertContains(response, 'inlines.min.js')
with override_settings(DEBUG=True):
response = self.client.get(
'/test_admin/%s/admin_views/section/add/' % 'admin')
self.assertContains(response, 'jquery.js')
self.assertNotContains(response, 'jquery.min.js')
self.assertContains(response, 'prepopulate.js')
self.assertNotContains(response, 'prepopulate.min.js')
self.assertContains(response, 'actions.js')
self.assertNotContains(response, 'actions.min.js')
self.assertContains(response, 'collapse.js')
self.assertNotContains(response, 'collapse.min.js')
self.assertContains(response, 'inlines.js')
self.assertNotContains(response, 'inlines.min.js')
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class SaveAsTests(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml', 'admin-views-person.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def
<FILEB>
<CHANGES>
bias_file="BIAS", label="metad",**kwargs):
<CHANGEE>
<FILEE>
<FILEB>
label=label,
pace=pace)+" \\n\\")
return ''.join(output)
def render_metad_bias_print(arg="tic0",stride=1000,label="metad",file="BIAS"):
""":param arg: tic name"""
""":param stride: stride for printing"""
""":param label: label for printing"""
""":param file:"""
""":return:"""
output=[]
arg=','.join([arg,label + ".bias"])
output.append(plumed_print_template.render(arg=arg,
stride=stride,
file=file))
return ''.join(output)
def render_tica_plumed_file(tica_mdl, df, grid_list=[None,None],interval_list=[None,None],
pace=1000, height=1.0, biasfactor=50,
temp=300, sigma=0.2, stride=1000, hills_file="HILLS",
<CHANGES>
bias_file="BIAS", label="metad"):
<CHANGEE>
"""Renders a tica plumed dictionary file that can be directly fed in openmm"""
""":param tica_mdl: project's ticamd"""
""":param df: data frame"""
""":param grid_list: list of min and max vals for grid"""
""":param interval_list: list of min and max vals for interval"""
""":param pace: gaussian drop rate"""
""":param biasfactor: gaussian attenuation rate"""
""":param temp: simulation temp"""
""":param sigma: sigma"""
""":param stride: bias file stride"""
""":param hills_file: hills file"""
""":param bias_file: bias file"""
<FILEE>
<SCANS>Contact":
func = np.repeat(None, len(inds))
elif df.featurizer[0] == "LandMarkFeaturizer":
func = np.repeat("exp", len(inds))
else:
func = df.otherinfo[inds]
feat_labels=['_'.join(map(str,i)) for i in df.resids[inds]]
feature_labels = [template.render(func=i,feature_group=j,feature_index=k) \
for i,j,k in zip(func[inds],df.featuregroup[inds],feat_labels)]
tic_coefficient = tica_mdl.components_[tic_index,]
if tica_mdl.kinetic_mapping:
tic_coefficient *= tica_mdl.eigenvalues_[tic_index]
arg=','.join(feature_labels)
tic_coefficient = ','.join(map(str,tic_coefficient))
output.append(plumed_combine_template.render(arg=arg,
coefficients=tic_coefficient,
label="tic%d"%tic_index,
periodic="NO") +" \\n\\")
return ''.join(output)
def render_metad_code(arg="tic0", sigma=0.2, height=1.0, hills="HILLS",biasfactor=40,
temp=300,interval=None, grid=None,
label="metad",pace=1000,**kwargs):
output=[]
if interval is None or grid is None:
plumed_script = plumed_plain_metad_template
output.append(plumed_script.render(arg=arg,
sigma=sigma,
height=height,
hills=hills,
temp=temp,
pace=pace)+" \\n\\")
else:
plumed_script = plumed_metad_template
grid_min=grid[0]
grid_max=grid[1]
output.append(plumed_script.render(arg=arg,
sigma=sigma,
height=height,
hills=hills,
biasfactor=biasfactor,
interval=','.join(map(str,interval)),
grid_min=grid_min,
grid_max=grid_max,
""":param label: metad label"""
""":return:"""
"""dictionary keyed on tica indices"""
return_dict = {}
inds = np.arange(tica_mdl.n_features)
raw_feats = render_raw_features(df,inds)
mean_feats = render_mean_free_features(df,inds,tica_mdl)
for i in range(tica
<FILEB>
<CHANGES>
clone._select = [SQL('1')]
<CHANGEE>
<FILEE>
<FILEB>
return query
def aggregate(self, aggregation=None, convert=True):
return self._aggregate(aggregation).scalar(convert=convert)
def count(self):
if self._distinct or self._group_by:
return self.wrapped_count()
# defaults to a count() of the primary key
return self.aggregate(convert=False) or 0
def wrapped_count(self, clear_limit=True):
clone = self.order_by()
if clear_limit:
clone._limit = clone._offset = None
sql, params = clone.sql()
wrapped = 'SELECT COUNT(1) FROM (%s) AS wrapped_select' % sql
rq = self.model_class.raw(wrapped, *params)
return rq.scalar() or 0
def exists(self):
clone = self.paginate(1, 1)
<CHANGES>
clone._select = [self.model_class._meta.primary_key]
<CHANGEE>
return bool(clone.scalar())
def get(self):
clone = self.paginate(1, 1)
try:
return clone.execute().next()
except StopIteration:
raise self.model_class.DoesNotExist(
'Instance matching query does not exist:\nSQL: %s\nPARAMS: %s'
% self.sql())
def first(self):
res = self.execute()
res.fill_cache(1)
<FILEE>
<SCANS> query._alias = self._alias
return query
def _model_shorthand(self, args):
accum = []
for arg in args:
if isinstance(arg, Node):
accum.append(arg)
elif isinstance(arg, Query):
accum.append(arg)
elif isinstance(arg, ModelAlias):
accum.extend(arg.get_proxy_fields())
elif isclass(arg) and issubclass(arg, Model):
accum.extend(arg._meta.get_fields())
return accum
def compound_op(operator):
def inner(self, other):
supported_ops = self.model_class._meta.database.compound_operations
if operator not in supported_ops:
raise ValueError(
'Your database does not support %s' % operator)
return CompoundSelect(self.model_class, self, operator, other)
return inner
__or__ = compound_op('UNION')
__and__ = compound_op('INTERSECT')
__sub__ = compound_op('EXCEPT')
def __xor__(self, rhs):
# Symmetric difference, should just be (self | rhs) - (self & rhs)...
wrapped_rhs = self.model_class.select(SQL('*')).from_(
EnclosedClause((self & rhs)).alias('_')).order_by()
return (self | rhs) - wrapped_rhs
def __select(self, *selection):
self._explicit_selection = len(selection) > 0
selection = selection or self.model_class._meta.get_fields()
self._select = self._model_shorthand(selection)
select = returns_clone(__select)
@returns_clone
def from_(self, *args):
self._from = None
if args:
self._from = list(args)
@returns_clone
def group_by(self, *args):
self._group_by = self._model_shorthand(args)
@returns_clone
def having(self, *expressions):
self._having = self._add_query_clauses(self._having, expressions)
@returns_clone
def order_by(self, *args):
self._order_by = list(args)
@returns_clone
def limit(self, lim):
self._limit = lim
@returns_clone
def offset(self, off):
self._offset = off
@returns_clone
def paginate(self, page, paginate_by=20):
if page > 0:
page -= 1
self._limit = paginate_by
self._offset = page * paginate_by
@returns_clone
def distinct(self, is_distinct=True):
self._distinct = is_distinct
@returns_clone
def for_update(self, for_update=True, nowait=False):
self._for_update = (for_update, nowait)
@returns_clone
def naive(self, naive=True):
self._naive = naive
@returns_clone
def tuples(self, tuples=True):
self._tuples = tuples
@returns_clone
<FILEB>
<CHANGES>
inc_dirs += ['/usr/5include']
<CHANGEE>
<FILEE>
<FILEB>
# Steen Lumholt's termios module
exts.append( Extension('termios', ['termios.c']) )
# Jeremy Hylton's rlimit interface
if platform not in ['cygwin']:
exts.append( Extension('resource', ['resource.c']) )
# Generic dynamic loading module
exts.append( Extension('dl', ['dlmodule.c']) )
# Sun yellow pages. Some systems have the functions in libc.
if platform not in ['cygwin']:
if (self.compiler.find_library_file(lib_dirs, 'nsl')):
libs = ['nsl']
else:
libs = []
exts.append( Extension('nis', ['nismodule.c'],
libraries = libs) )
# Curses support, requring the System V version of curses, often
# provided by the ncurses library.
if platform == 'sunos4':
<CHANGES>
include_dirs += ['/usr/5include']
<CHANGEE>
lib_dirs += ['/usr/5lib']
if (self.compiler.find_library_file(lib_dirs, 'ncurses')):
curses_libs = ['ncurses']
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
elif (self.compiler.find_library_file(lib_dirs, 'curses')):
if (self.compiler.find_library_file(lib_dirs, 'terminfo')):
curses_libs = ['curses', 'terminfo']
else:
curses_libs = ['curses', 'termcap']
exts.append( Extension('_curses', ['_cursesmodule.c'],
libraries = curses_libs) )
<FILEE>
<SCANS>.jclark.com/pub/xml/expat.zip.
#
# EXPAT_DIR, below, should point to the expat/ directory created by
# unpacking the Expat source distribution.
#
# Note: the expat build process doesn't yet build a libexpat.a; you
# can do this manually while we try convince the author to add it. To
# do so, cd to EXPAT_DIR, run "make" if you have not done so, then
# run:
#
# ar cr libexpat.a xmltok/*.o xmlparse/*.o
#
expat_defs = []
expat_incs = find_file('expat.h', inc_dirs, [])
if expat_incs is not None:
# expat.h was found
expat_defs = [('HAVE_EXPAT_H', 1)]
else:
expat_incs = find_file('xmlparse.h', inc_dirs, [])
if (expat_incs is not None and
self.compiler.find_library_file(lib_dirs, 'expat')):
exts.append( Extension('pyexpat', ['pyexpat.c'],
define_macros = expat_defs,
libraries = ['expat']) )
# Platform-specific libraries
if platform == 'linux2':
# Linux-specific modules
exts.append( Extension('linuxaudiodev', ['linuxaudiodev.c']) )
if platform == 'sunos5':
# SunOS specific modules
exts.append( Extension('sunaudiodev', ['sunaudiodev.c']) )
self.extensions.extend(exts)
# Call the method for detecting whether _tkinter can be compiled
self.detect_tkinter(inc_dirs, lib_dirs)
def detect_tkinter(self, inc_dirs, lib_dirs):
# The _tkinter module.
#
# The command for _tkinter is long and site specific. Please
# uncomment and/or edit those parts as indicated. If you don't have a
# specific extension (e.g. Tix or BLT), leave the corresponding line
# commented out. (Leave the trailing backslashes in! If you
# experience strange errors, you may want to join all uncommented
# lines and remove the backslashes -- the backslash interpretation is
# done by the shell's "read" command and it may not be implemented on
# every system.
# Assume we haven't found any of the libraries or include files
tcllib = tklib = tcl_includes = tk_includes = None
for version in ['8.4', '8.3', '8.2', '8.1', '8.0
<FILEB>
<CHANGES>
if share0s: self.send_share0s(hashes=share0s)
<CHANGEE>
<FILEE>
<FILEB>
self.node.handle_share(share, self)
def send_shares(self, shares, full=False):
share1bs = []
share0s = []
share1as = []
# XXX doesn't need to send full block when it's not urgent
# eg. when getting history
for share in shares:
if share.hash <= share.header['target']:
share1bs.append(share.as_share1b())
else:
if self.mode == 0 and not full:
share0s.append(share.hash)
elif self.mode == 1 or full:
share1as.append(share.as_share1a())
else:
raise ValueError(self.mode)
if share1bs: self.send_share1bs(share1bs=share1bs)
<CHANGES>
if share0s: self.send_share0s(share0s=share0s)
<CHANGEE>
if share1as: self.send_share1as(share1as=share1as)
def connectionLost(self, reason):
if self.node_var_watch is not None:
self.node.mode_var.changed.unwatch(self.node_var_watch)
if self.connected2:
self.node.lost_conn(self)
class ServerFactory(protocol.ServerFactory):
def __init__(self, node):
self.node = node
def buildProtocol(self, addr):
p = Protocol(self.node)
p.factory = self
<FILEE>
<SCANS> %s:%i' % (conn.transport.getPeer().host, conn.transport.getPeer().port)
def lost_conn(self, conn):
if conn.nonce not in self.peers:
raise ValueError('''don't have peer''')
if conn is not self.peers[conn.nonce]:
raise ValueError('wrong conn')
del self.peers[conn.nonce]
print 'Lost peer %s:%i' % (conn.transport.getPeer().host, conn.transport.getPeer().port)
def got_addr(self, (host, port), services, timestamp):
if (host, port) in self.addr_store:
old_services, old_first_seen, old_last_seen = self.addr_store[host, port]
self.addr_store[host, port] = services, old_first_seen, max(old_last_seen, timestamp)
else:
self.addr_store[host, port] = services, timestamp, timestamp
def handle_share(self, share, peer):
print 'handle_share', (share, peer)
def handle_share_hash(self, hash_, peer):
print 'handle_share_hash', (hash_, peer)
def handle_get_shares(self, hashes, parents, stops, peer):
print 'handle_get_shares', (hashes, parents, stops, peer)
if __name__ == '__main__':
p = random.randrange(2**15, 2**16)
for i in xrange(5):
p2 = random.randrange(2**15, 2**16)
print p, p2
n = Node(p2, True, {addrdb_key.pack(dict(address='127.0.0.1', port=p)): addrdb_value.pack(dict(services=0, first_seen=int(time.time())-10, last_seen=int(time.time())))})
n.start()
p = p2
reactor.run()
<FILEB>
<CHANGES>
raise errors.InconsistentDatabaseError("Bad number of files (%d) " \
<CHANGEE>
<FILEE>
<FILEB>
# Use the exisitng DB connection, or open a new one if None was provided
db = existdb or database.Database()
db.connect()
select = db.select([db.parfiles.c.filename, \
db.parfiles.c.filepath, \
db.parfiles.c.md5sum]).\
where(db.parfiles.c.parfile_id==parfile_id)
result = db.execute(select)
rows = result.fetchall()
result.close()
if not existdb:
# Close the DB connection we opened
db.close()
if len(rows) == 1:
filename = rows[0]['filename']
filepath = rows[0]['filepath']
md5sum_DB = rows[0]['md5sum']
else:
<CHANGES>
raise errors.IncosistentDatabaseError("Bad number of files (%d) " \
<CHANGEE>
"with parfile_id=%d" % (len(rows), parfile_id))
fullpath = os.path.join(filepath,filename)
# Make sure the file exists
Verify_file_path(fullpath)
if verify_md5:
print_info("Confirming MD5 sum of %s matches what is " \
"stored in DB (%s)" % (fullpath, md5sum_DB), 2)
md5sum_file = Get_md5sum(fullpath)
if md5sum_DB != md5sum_file:
raise errors.FileError("md5sum check of %s failed! MD5 from " \
"DB (%s) != MD5 from file (%s)" % \
(fullpath, md5sum_DB, md5sum_file))
<FILEE>
<SCANS>2)"""
"""Output:"""
"""md5: The hexidecimal string of the MD5 checksum."""
f = open(fname, 'rb')
md5 = hashlib.md5()
block = f.read(block_size)
while block:
md5.update(block)
block = f.read(block_size)
f.close()
return md5.hexdigest()
def get_master_parfile(pulsar_id):
"""Given a pulsar ID number return the full path"""
"""to that pulsar's master parfile. If no master parfile"""
"""exists return None."""
"""Input:"""
"""pulsar_id: The pulsar ID number to get a master parfile for."""
"""Output:"""
"""masterpar_id: The master parfile's parfile_id value, or"""
"""None if no master parfile exists."""
"""fn: The master parfile's full path, or None if no master"""
"""parfile exists."""
db = database.Database()
db.connect()
select = db.select([db.master_parfiles.c.parfile_id, \
db.parfiles.c.filepath, \
db.parfiles.c.filename], \
(db.master_parfiles.c.parfile_id==db.parfiles.c.parfile_id) & \
(db.master_parfiles.c.pulsar_id==pulsar_id))
result = db.execute(select)
rows = db.fetchall()
result.close()
db.close()
if len(rows) > 1:
raise errors.InconsistentDatabaseError("There are too many (%d) " \
"master parfiles for pulsar #%d" % \
(len(rows), pulsar_id ))
elif len(rows) == 0:
return None, None
else:
row = rows[0]
if row['filepath'] is None or row['filename'] is None:
return None, None
else:
return row['parfile_id'], \
os.path.join(row['filepath'], row['filename'])
def get_master_template(pulsar_id, obssystem_id):
"""Given a pulsar ID number, and observing system ID number"""
"""return the full path to the appropriate master template, """
"""and its ID number. If no master template exists return"""
"""None."""
"""Inputs:"""
"""pulsar_id: The pulsar ID number."""
"""obssystem_id: The observing system ID number."""
"""Outputs:"""
"""mastertmp_id: The master template's template_id value, or"""
"""None if no master template exists for the pulsar/obssystem"""
"""combination provided."""
"""fn: The master template's full
<FILEB>
<CHANGES>
detach_dids(scope=parent_did.scope, name=parent_did.name, dids=[{'scope': parent_did.child_scope, 'name': parent_did.child_name}], session=session)
<CHANGEE>
<FILEE>
<FILEB>
if not event_type:
add_message(event_type, {'account': account,
'scope': did['scope'],
'name': did['name']},
session=session)
# Delete rules on did
if rule_id_clause:
with record_timer_block('undertaker.rules'):
for (rule_id, scope, name, rse_expression, ) in session.query(models.ReplicationRule.id,
models.ReplicationRule.scope,
models.ReplicationRule.name,
models.ReplicationRule.rse_expression).filter(or_(*rule_id_clause)):
logging.debug('Removing rule %s for did %s:%s on RSE-Expression %s' % (str(rule_id), scope, name, rse_expression))
rucio.core.rule.delete_rule(rule_id=rule_id, nowait=True, session=session)
# Detach from parent dids:
if parent_content_clause:
with record_timer_block('undertaker.parent_content'):
for parent_did in session.query(models.DataIdentifierAssociation).filter(or_(*parent_content_clause)):
<CHANGES>
detach_dids(scope=parent_did.scope, name=parent_did.name, dids=[{'scope': parent_did.child_Scope, 'name': parent_did.child_name}], sesson=session)
<CHANGEE>
# Remove content
if content_clause:
with record_timer_block('undertaker.content'):
rowcount = session.query(models.DataIdentifierAssociation).filter(or_(*content_clause)).\
delete(synchronize_session=False)
record_counter(counters='undertaker.content.rowcount', delta=rowcount)
# remove data identifier
with record_timer_block('undertaker.dids'):
rowcount = session.query(models.DataIdentifier).filter(or_(*did_clause)).\
filter(or_(models.DataIdentifier.did_type == DIDType.CONTAINER, models.DataIdentifier.did_type == DIDType.DATASET)).\
delete(synchronize_session=False)
@transactional_session
<FILEE>
<SCANS> """:param scope: The scope."""
""":param name: The name."""
""":param session: The database session."""
""":returns: List of dids."""
""":rtype: Generator."""
query = session.query(models.DataIdentifierAssociation.scope,
models.DataIdentifierAssociation.name,
models.DataIdentifierAssociation.did_type).filter_by(child_scope=scope, child_name=name)
for did in query.yield_per(5):
yield {'scope': did.scope, 'name': did.name, 'type': did.did_type}
list_all_parent_dids(scope=did.scope, name=did.name, session=session)
@stream_session
def list_child_datasets(scope, name, session=None):
"""List all child datasets of a container."""
""":param scope: The scope."""
""":param name: The name."""
""":param session: The database session"""
""":returns: List of dids"""
""":rtype: Generator"""
query = session.query(models.DataIdentifierAssociation.child_scope,
models.DataIdentifierAssociation.child_name,
models.DataIdentifierAssociation.child_type).filter(models.DataIdentifierAssociation.scope == scope,
models.DataIdentifierAssociation.name == name,
models.DataIdentifierAssociation.child_type != DIDType.FILE)
query = query.with_hint(models.DataIdentifierAssociation, "INDEX(CONTENTS CONTENTS_PK)", 'oracle')
for child_scope, child_name, child_type in query.yield_per(5):
if child_type == DIDType.CONTAINER:
list_child_datasets(scope=child_scope, name=child_name, session=session)
else:
yield {'scope': child_scope, 'name': child_name, 'type': child_type}
@stream_session
def list_files(scope, name, long=False, session=None):
"""List data identifier file contents."""
""":param scope: The scope name."""
""":param name: The data identifier name."""
""":param long: A boolean to choose if GUID is returned or not."""
""":param session: The
<FILEB>
<CHANGES>
return self.field.blank and mark_safe(' class="optional"') or ''
<CHANGEE>
<FILEE>
<FILEB>
cls.nodelists[klass] = nodelist
return nodelist
else:
return cls.nodelists[klass]
get_nodelist = classmethod(get_nodelist)
def render(self, context):
bound_field = self.bound_field_var.resolve(context)
context.push()
context['bound_field'] = bound_field
output = self.get_nodelist(bound_field.field.__class__).render(context)
context.pop()
return output
class FieldWrapper(object):
def __init__(self, field ):
self.field = field
def needs_header(self):
return not isinstance(self.field, models.AutoField)
def header_class_attribute(self):
<CHANGES>
return self.field.blank and ' class="optional"' or ''
<CHANGEE>
def use_raw_id_admin(self):
return isinstance(self.field.rel, (models.ManyToOneRel, models.ManyToManyRel)) \
and self.field.rel.raw_id_admin
class FormFieldCollectionWrapper(object):
def __init__(self, field_mapping, fields, index):
self.field_mapping = field_mapping
self.fields = fields
self.bound_fields = [AdminBoundField(field, self.field_mapping, field_mapping['original'])
for field in self.fields]
self.index = index
class TabularBoundRelatedObject(BoundRelatedObject):
def __init__(self, related_object, field_mapping, original):
<FILEE>
<SCANS>from django import template
from django.contrib.admin.views.main import AdminBoundField
from django.template import loader
from django.utils.text import capfirst
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.utils.html import escape
from django.db import models
from django.db.models.fields import Field
from django.db.models.related import BoundRelatedObject
from django.conf import settings
import re
register = template.Library()
word_re = re.compile('[A-Z][a-z]+')
absolute_url_re = re.compile(r'^(?:http(?:s)?:/)?/', re.IGNORECASE)
def class_name_to_underscored(name):
return u'_'.join([s.lower() for s in word_re.findall(name)[:-1]])
def include_admin_script(script_path):
"""Returns an HTML script element for including a script from the admin"""
"""media url (or other location if an absolute url is given)."""
"""Example usage::"""
"""{% include_admin_script "js/calendar.js" %}"""
"""could return::"""
"""<script type="text/javascript" src="/media/admin/js/calendar.js">"""
if not absolute_url_re.match(script_path):
script_path = '%s%s' % (settings.ADMIN_MEDIA_PREFIX, script_path)
return mark_safe(u'<script type="text/javascript" src="%s"></script>'
% script_path)
include_admin_script = register.simple_tag(include_admin_script)
def submit_row(context):
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
return {
'onclick_attrib': (opts.get_ordered_objects() and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': (not is_popup and context['has_delete_permission']
and (change or context['show_delete'])),
'show_save_as_new': not is_popup and change and opts.admin.save_as,
'show_save_and_add_another': not is_popup and (not opts.admin.save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'show_save': True
}
submit_row = register.inclusion_tag('admin/submit_line.html', takes_context=True)(submit_row)
def field_label(bound_field):
class_names = []
if isinstance(bound_field.field, models.BooleanField):
class_names.append("vCheckboxLabel")
colon = ""
else:
if not bound_field.field.blank:
class_names.append('required')
if not bound_field.first
<FILEB>
<CHANGES>
invalids.update(c for c in chlist if isinstance(c, discord.TextChannel))
<CHANGEE>
<FILEE>
<FILEB>
chlist.difference_update(invalids)
self.config.bound_channels.difference_update(invalids)
if chlist:
log.info("Bound to text channels:")
[log.info(' - {}/{}'.format(ch.guild.name.strip(), ch.name.strip())) for ch in chlist if ch]
else:
print("Not bound to any text channels")
if invalids and self.config.debug_mode:
print(flush=True)
log.info("Not binding to voice channels:")
[log.info(' - {}/{}'.format(ch.guild.name.strip(), ch.name.strip())) for ch in invalids if ch]
print(flush=True)
else:
log.info("Not bound to any text channels")
if self.config.autojoin_channels:
chlist = set(self.get_channel(i) for i in self.config.autojoin_channels if i)
chlist.discard(None)
invalids = set()
<CHANGES>
invalids.update(c for c in chlist if c.type == discord.ChannelType.text)
<CHANGEE>
chlist.difference_update(invalids)
self.config.autojoin_channels.difference_update(invalids)
if chlist:
log.info("Autojoining voice chanels:")
[log.info(' - {}/{}'.format(ch.guild.name.strip(), ch.name.strip())) for ch in chlist if ch]
else:
log.info("Not autojoining any voice channels")
if invalids and self.config.debug_mode:
print(flush=True)
log.info("Cannot autojoin text channels:")
[log.info(' - {}/{}'.format(ch.guild.name.strip(), ch.name.strip())) for ch in invalids if ch]
self.autojoin_channels = chlist
<FILEE>
<SCANS>all':
requested_cats = cats
else:
requested_cats = [cat] + [c.strip(',') for c in leftover_args]
data = ['Your ID: %s' % author.id]
for cur_cat in requested_cats:
rawudata = None
if cur_cat == 'users':
data.append("\nUser IDs:")
rawudata = ['%s #%s: %s' % (m.name, m.discriminator, m.id) for m in guild.members]
elif cur_cat == 'roles':
data.append("\nRole IDs:")
rawudata = ['%s: %s' % (r.name, r.id) for r in guild.roles]
elif cur_cat == 'channels':
data.append("\nText Channel IDs:")
tchans = [c for c in guild.channels if isinstance(c, discord.TextChannel)]
rawudata = ['%s: %s' % (c.name, c.id) for c in tchans]
rawudata.append("\nVoice Channel IDs:")
vchans = [c for c in guild.channels if isinstance(c, discord.VoiceChannel)]
rawudata.extend('%s: %s' % (c.name, c.id) for c in vchans)
if rawudata:
data.extend(rawudata)
with BytesIO() as sdata:
sdata.writelines(d.encode('utf8') + b'\n' for d in data)
sdata.seek(0)
# TODO: Fix naming (Discord20API-ids.txt)
await author.send(file=discord.File(sdata, filename='%s-ids-%s.txt' % (guild.name.replace(' ', '_'), cat)))
return Response("Sent a message with a list of IDs.", delete_after=20)
async def cmd_perms(self, author, user_mentions, channel, guild, permissions):
"""Usage:"""
"""{command_prefix}perms [@user]"""
"""Sends the user a list of their permissions, or the permissions of the user specified."""
lines = ['Command permissions in %s\n' % guild.name, '```', '```']
if user_mentions:
user = user_mentions[0]
permissions = self.permissions.for_user(user)
for perm in permissions.__dict__:
if perm in ['user_list'] or permissions.__dict__[perm] == set():
continue
lines.insert(len(lines) - 1, "%s: %s"
<FILEB>
<CHANGES>
percentaje = rise.split('%')[0]
<CHANGEE>
<FILEE>
<FILEB>
#obj_rent = self.browse(cr,uid,ids)[0]
debug(obj_rent)
if obj_rent:
vals = {}
is_registrated = False
current_date = parser.parse(obj_rent.rent_main_start_date).date()
current_date = current_date.replace(year=date.today().year)
for obj_historic in obj_rent.rent_main_historic_ids:
debug(current_date.isoformat())
debug(obj_historic.anual_value_date)
if obj_historic.anual_value_date == current_date.isoformat():
is_registrated = True
match_historic = obj_historic
break
#We need to update the amount_base of the rent, so we ca
#charge the next part with the rate included
amount_base = obj_rent.rent_main_amount_base
rise = obj_rent.rent_main_rise
<CHANGES>
percentaje = rise('%')[0]
<CHANGEE>
prev_value = amount_base
years_val = amount_base * (1 + float(percentaje) / 100)
#obj_rent.write({'rent_amount_base' : years_val})
vals['rent_main_amount_base'] = years_val
if obj_rent.rent_related_real == 'local':
vals['anual_value_local'] = obj_rent.rent_rent_local
if not is_registrated:
vals['rent_main_historic_ids'] = [(0,0,{'anual_value_rent':obj_rent.id,'anual_value_value':years_val,'anual_value_prev_value' : prev_value,'anual_value_rate' : rise, 'anual_value_date' : current_date, 'anual_value_type' : 'main'})]
else:
vals['rent_main_historic_ids'] = [(1,match_historic.id,{'anual_value_value':amount_base,'an<SCANS>rent.rent').write(cr, uid, ids, {}, context)
return { 'value' : res}
def _rent_main_performance(self,cr,uid,ids,field_name,args,context):
res = {}
#for obj_rent in self.pool.get('rent.rent').browse(cr,uid,ids):
# total = 1
# if obj_rent.rent_main_total:
# res[obj_rent.id] = "%.2f%%" % ((obj_rent.rent_main_amount_base * 12) / ( or ) * 100)
return 0
def _rent_main_amount_years(self,cr,uid,ids,field_name,args,contexto):
res = {}
for obj_rent in self.pool.get('rent.rent').browse(cr,uid,ids):
years_val = {}
currency_id = obj_rent.currency_id
percentaje = obj_rent.rent_main_rise.split('%')[0]
years_val['rent_main_rise_year2'] = obj_rent.rent_main_amount_base * (1 + float(percentaje) / 100)
years_val['rent_main_rise_year3'] = years_val['rent_main_rise_year2'] * (1 + float(percentaje) / 100)
years_val['rent_main_rise_year2d'] = years_val['rent_main_rise_year2'] / currency_id.rate
years_val['rent_main_rise_year3d'] = years_val['rent_main_rise_year3'] / currency_id.rate
#Just to avoid use a separate function
years_val['rent_main_amountd_base'] = obj_rent.rent_main_amount_base / currency_id.rate
res[obj_rent.id] = years_val
return res
_columns = {
'name' : fields.char('Name',size=64),
'rent_rent_
<FILEB>
<CHANGES>
root = self.instPath, stderr = '/dev/null')
<CHANGEE>
<FILEE>
<FILEB>
self.log("copying %s to %s" % (fromFile, to))
os.rename(fromFile, to)
else:
self.log("missing DD module %s (this may be okay)" %
fromFile)
def depmodModules(self):
kernelVersions = []
if (self.hdList.has_key('kernel-smp') and
self.hdList['kernel-smp'].selected):
version = (self.hdList['kernel-smp']['version'] + "-" +
self.hdList['kernel-smp']['release'] + "smp")
kernelVersions.append(version)
version = (self.hdList['kernel']['version'] + "-" +
self.hdList['kernel']['release'])
kernelVersions.append(version)
for version in kernelVersions:
iutil.execWithRedirect ("/sbin/depmod",
[ "/sbin/depmod", "-a", version ],
<CHANGES>
root = self.instPath)
<CHANGEE>
def writeConfiguration(self):
self.writeLanguage ()
self.writeMouse ()
self.writeKeyboard ()
self.writeNetworkConfig ()
self.setupAuthentication ()
self.writeRootPassword ()
self.createAccounts ()
self.writeTimezone()
def doInstall(self):
# make sure we have the header list and comps file
self.getHeaderList()
<FILEE>
<SCANS>floppy"
isys.makeDevInode(device, file)
try:
fd = os.open(file, os.O_RDONLY)
except:
raise RuntimeError, "boot disk creation failed"
os.close(fd)
kernel = self.hdList['kernel']
kernelTag = "-%s-%s" % (kernel['version'], kernel['release'])
w = self.intf.waitWindow (_("Creating"), _("Creating boot disk..."))
self.setFdDevice ()
rc = iutil.execWithRedirect("/sbin/mkbootdisk",
[ "/sbin/mkbootdisk",
"--noprompt",
"--device",
self.fdDevice,
kernelTag[1:] ],
stdout = None, stderr = None,
searchPath = 1, root = self.instPath)
w.pop()
if rc:
raise RuntimeError, "boot disk creation failed"
def freeHeaderList(self):
if (self.hdList):
self.hdList = None
def getHeaderList(self):
if (not self.hdList):
w = self.intf.waitWindow(_("Reading"),
_("Reading package information..."))
self.hdList = self.method.readHeaders()
w.pop()
return self.hdList
def getCompsList(self):
if (not self.comps):
self.getHeaderList()
self.comps = self.method.readComps(self.hdList)
for comp in self.comps:
if comp.selected:
comp.select (1)
self.comps['Base'].select(1)
self.updateInstClassComps()
return self.comps
def updateInstClassComps(self):
# don't load it just for this
if (not self.comps): return
group = self.instClass.getGroups()
packages = self.instClass.getPackages()
if (group == None and packages == None): return 0
for n in self.comps.keys():
self.comps[n].unselect(0)
self.comps['Base'].select(1)
if group:
for n in group:
self.comps[n].select(1)
if packages:
for n in packages:
self.selectPackage(n)
if self.x.server:
self.selectPackage('XFree86-' + self.x.server)
def selectPackage(self, package):
if not self.hdList.packages.has_key(package):
str = "package %s is not available" % (package,)
raise ValueError, str
self.hdList.packages[package].selected = 1
def writeNetworkConfig (self):
# /etc/sysconfig/network-scripts/ifcfg-*
for dev in self.network.netdevices.values ():
device = dev.get ("device")
f = open (self.instPath + "/etc/sysconfig/network-scripts/ifcfg-" + device, "w")
f.write (
<FILEB>
<CHANGES>
class UDPTimeoutTest(SocketUDPTest):
<CHANGEE>
<FILEE>
<FILEB>
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
<CHANGES>
class UDPTimeoutTest(SocketTCPTest):
<CHANGEE>
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
<FILEE>
<SCANS>#!/usr/bin/env python3
import unittest
from test import support
import errno
import io
import socket
import select
import time
import traceback
import queue
import sys
import os
import array
import platform
import contextlib
from weakref import proxy
import signal
import math
try:
import fcntl
except ImportError:
fcntl = False
def try_address(host, port=0, family=socket.AF_INET):
"""Try to bind a socket on the given host:port and return True"""
"""if that has been possible."""
try:
sock = socket.socket(family, socket.SOCK_STREAM)
sock.bind((host, port))
except (socket.error, socket.gaierror):
return False
else:
sock.close()
return True
def linux_version():
try:
# platform.release() is something like '2.6.33.7-desktop-2mnb'
version_string = platform.release().split('-')[0]
return tuple(map(int, version_string.split('.')))
except ValueError:
return 0, 0, 0
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf8') ## test unicode string and carriage return
SUPPORTS_IPV6 = socket.has_ipv6 and try_address('::1', family=socket.AF_INET6)
try:
import _thread as thread
import threading
except ImportError:
thread = None
threading = None
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen(1)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadableTest:
"""Threadable Test class"""
"""The ThreadableTest class makes it easy to create a threaded"""
"""client/server pair from an existing unit test. To create a"""
"""new threaded class from an existing unit test, use multiple"""
"""inheritance:"""
"""class NewClass (OldClass, ThreadableTest):"""
"""pass"""
"""This class defines two new fixture functions with obvious"""
"""purposes for overriding:"""
"""clientSetUp ()"""
"""clientTearDown ()"""
"""Any new test functions within the class must then define"""
"""tests in pairs, where the test name is preceeded with a"""
"""'_' to indicate the client portion of the test. Ex:"""
"""def testFoo(self):"""
"""# Server portion"""
"""def _testFoo(self):"""
"""# Client portion"""
"""Any exceptions raised by the clients during their tests"""
"""are caught and transferred to the main thread to alert"""
"""the testing framework."""
"""Note, the server setup function cannot
<FILEB>
<CHANGES>
yield d.build()
<CHANGEE>
<FILEE>
<FILEB>
"""@type root: L{Element}"""
"""@param definitions: A definitions object."""
"""@type definitions: L{Definitions}"""
WObject.__init__(self, root, definitions)
self.location = root.get('location')
self.ns = root.get('namespace')
self.imported = None
pmd = self.__metadata__.__print__
pmd.wrappers['imported'] = repr
@defer.inlineCallbacks
def load(self, definitions):
"""Load the object by opening the URL """
url = self.location
log.debug('importing (%s)', url)
if '://' not in url:
url = urljoin(definitions.url, url)
options = definitions.options
d = Definitions(url, options)
<CHANGES>
yield d.open(url)
<CHANGEE>
if d.root.match(Definitions.Tag, wsdlns):
self.import_definitions(definitions, d)
return
if d.root.match(Schema.Tag, Namespace.xsdns):
self.import_schema(definitions, d)
return
raise Exception('document at "%s" is unknown' % url)
def import_definitions(self, definitions, d):
"""import/merge wsdl definitions """
definitions.types += d.types
definitions.messages.update(d.messages)
definitions.port_types.update(d.port_types)
<FILEE>
<SCANS> object."""
"""@type name: str"""
"""@ivar qname: The I{qualified} name of the object."""
"""@type qname: (name, I{namespace-uri})."""
def __init__(self, root, definitions):
"""@param root: An XML root element."""
"""@type root: L{Element}"""
"""@param definitions: A definitions object."""
"""@type definitions: L{Definitions}"""
WObject.__init__(self, root, definitions)
self.name = root.get('name')
self.qname = (self.name, definitions.tns[1])
pmd = self.__metadata__.__print__
pmd.wrappers['qname'] = repr
class Definitions(WObject):
"""Represents the I{root} container of the WSDL objects as defined"""
"""by <wsdl:definitions/>"""
"""@ivar id: The object id."""
"""@type id: str"""
"""@ivar options: An options dictionary."""
"""@type options: L{options.Options}"""
"""@ivar url: The URL used to load the object."""
"""@type url: str"""
"""@ivar tns: The target namespace for the WSDL."""
"""@type tns: str"""
"""@ivar schema: The collective WSDL schema object."""
"""@type schema: L{SchemaCollection}"""
"""@ivar children: The raw list of child objects."""
"""@type children: [L{WObject},...]"""
"""@ivar imports: The list of L{Import} children."""
"""@type imports: [L{Import},...]"""
"""@ivar messages: The dictionary of L{Message} children key'd by I{qname}"""
"""@type messages: [L{Message},...]"""
"""@ivar port_types: The dictionary of L{PortType} children key'd by I{qname}"""
"""@type port_types: [L{PortType},...]"""
"""@ivar bindings: The dictionary of L{Binding} children key'd by I{qname}"""
"""@type bindings: [L{Binding},...]"""
"""@ivar service: The service object."""
"""@type service: L{Service}"""
Tag = 'definitions'
def __init__(self, url, options):
"""@param url: A URL to the WSDL."""
"""@type url: str"""
"""@param options: An options dictionary."""
"""@type options: L{options.Options}"""
WObject.__init__(self, root = None)
self.id = objid(self)
self.options = options
self.types = []
self.schema = None
self.children = []
self.imports = []
self.messages = {}
self.port_types = {}
self.bindings = {}
self.services = []
self.url = url
pmd = self.__metadata__.__print__
pmd.excludes.append('children')
pmd.excludes.append('wsdl')
pmd.wrappers['schema'] = repr
@defer.inlineCallbacks
def build(self):
log.debug('reading wsdl at: %
<FILEB>
<CHANGES>
self.assertContains(response, b"Currently")
<CHANGEE>
<FILEE>
<FILEB>
self.client.logout()
def test_inline_file_upload_edit_validation_error_post(self):
"""Test that inline file uploads correctly display prior data (#10002)."""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": six.text_type(self.picture.id),
"pictures-0-gallery": six.text_type(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post('/test_admin/%s/admin_views/gallery/%d/' % (self.urlbit, self.gallery.id), post_data)
<CHANGES>
self.assertTrue(response._container[0].find("Currently:") > -1)
<CHANGEE>
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminInlineTests(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
<FILEE>
<SCANS>adduser_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Change User
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 200)
login = self.client.post('/test_admin/admin/', self.changeuser_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Delete User
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 200)
login = self.client.post('/test_admin/admin/', self.deleteuser_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Regular User should not be able to login.
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 200)
login = self.client.post('/test_admin/admin/', self.joepublic_login)
self.assertEqual(login.status_code, 200)
self.assertContains(login, ERROR_MESSAGE)
# Requests without username should not return 500 errors.
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 200)
login = self.client.post('/test_admin/admin/', self.no_username_login)
self.assertEqual(login.status_code, 200)
form = login.context[0].get('form')
self.assertEqual(form.errors['username'][0], 'This field is required.')
def testLoginSuccessfullyRedirectsToOriginalUrl(self):
response = self.client.get('/test_admin/admin/')
self.assertEqual(response.status_code, 200)
query_string = 'the-answer=42'
redirect_url = '/test_admin/admin/?%s' % query_string
new_next = {REDIRECT_FIELD_NAME: redirect_url}
login = self.client.post('/test_admin/admin/', dict(self.super_login, **new_next), QUERY_STRING=query_string)
self.assertRedirects(login, redirect_url)
def testDoubleLoginIsNotAllowed(self
<FILEB>
<CHANGES>
return [pygpu.get_include(), np.get_include()] + other_dirs
<CHANGEE>
<FILEE>
<FILEB>
"""}""" % {'name': name}
def c_init_code(self):
# We don't actually need the numpy API except in
# HostFromGpu and GpuFromHost and those case will be covered
# by the TensorType parameter
return ['import_pygpu__gpuarray();']
def c_headers(self):
# We need arrayobject for the PyArrayDescr struct def
# (even if we just use a pointer to it in a function def)
return ['<gpuarray/array.h>', '<gpuarray/kernel.h>',
'<gpuarray/error.h>', '<gpuarray/buffer.h>',
'<gpuarray/buffer_blas.h>', '<numpy/arrayobject.h>',
'<gpuarray_api.h>']
def c_header_dirs(self):
other_dirs = []
alt_inc_dir = os.path.abspath(os.path.normpath(sys.exec_prefix + '/Library/include'))
if os.path.exists(alt_inc_dir) and os.path.isdir(alt_inc_dir):
other_dirs.append(alt_inc_dir)
<CHANGES>
return [pygpu.get_include(), numpy.get_include()] + other_dirs
<CHANGEE>
def c_lib_dirs(self):
alt_lib_dir = os.path.abspath(os.path.normpath(sys.exec_prefix + '/Library/lib'))
if os.path.exists(alt_lib_dir) and os.path.isdir(alt_lib_dir):
return [alt_lib_dir]
return []
def c_libraries(self):
return ['gpuarray']
def c_code_cache_version(self):
ver = pygpu.gpuarray.abi_version()
# we only use the major version since the minor revision are compatible.
return (2, ver[0])
class _operators(_tensor_py_operators):
<FILEE>
<SCANS>SetString(PyExc_TypeError, "expected a GpuContext");"""
"""%(fail)s"""
"""}""" % dict(name=name, fail=sub['fail'])
else:
res = ""
return res + """%(name)s = (PyGpuContextObject *)py_%(name)s;"""
"""Py_INCREF(%(name)s);""" % dict(name=name)
def c_cleanup(self, name, sub):
return "Py_XDECREF(%(name)s); %(name)s = NULL;" % dict(name=name)
# c_sync is intentionally not declared to prevent normal usage
def c_init_code(self):
return ['import_pygpu__gpuarray();']
def c_headers(self):
return ['<gpuarray_api.h>']
def c_header_dirs(self):
return [pygpu.get_include()]
def c_code_cache_version(self):
ver = pygpu.gpuarray.api_version()
return (0, ver[0])
# Variable, Contstant, ... not declared
"""Instance of :class:`GpuContextType` to use for the context_type"""
"""declaration of an operation."""
gpu_context_type = GpuContextType()
# THIS WORKS But GpuArray instances don't compare equal to one
# another, and what about __hash__ ? So the unpickled version doesn't
# equal the pickled version, and the cmodule cache is not happy with
# the situation. The old back-end have this same comment and use the
# same mechanism.
def GpuArray_unpickler(npa, ctx_name):
if config.experimental.unpickle_gpu_on_cpu:
# directly return numpy array
warnings.warn(
"config.experimental.unpickle_gpu_on_cpu is set to True. "
"Unpickling GpuArray as numpy.ndarray")
return npa
elif pygpu:
ctx = get_context(ctx_name)
return pygpu.gpuarray.array(npa, copy=True, context=ctx)
else:
raise ImportError("pygpu not found. Cannot unpickle GpuArray")
copyreg.constructor(GpuArray_unpickler)
def GpuArray_pickler(cnda):
ctx_name = _name_for_ctx(cnda.context)
return (GpuArray_unpickler, (np.asarray(cnda), ctx_name))
# In case pygpu is not imported.
if pygpu is not None:
copy
<FILEB>
<CHANGES>
assert isinstance(element, (c.Comment, c.Statement, c.Value))
<CHANGEE>
<FILEE>
<FILEB>
return "<%s (%d, %d, %d)>" % (self.__class__.__name__, len(self.header),
len(self.body), len(self.footer))
@property
def ccode(self):
body = tuple(s.ccode for s in self.body)
return c.Module(self.header + (self._wrapper(body),) + self.footer)
@property
def children(self):
return (self.body,)
class List(Block):
"""Class representing a sequence of one or more statements."""
is_List = True
_wrapper = c.Collection
class Element(Node):
"""A generic node that is worth identifying in an Iteration/Expression tree."""
"""It corresponds to a single :class:`cgen.Statement`."""
is_Element = True
def __init__(self, element):
<CHANGES>
assert isinstance(element, (c.Comment, c.Statement))
<CHANGEE>
self.element = element
def __repr__(self):
return "Element::\n\t%s" % (self.element)
@property
def ccode(self):
return self.element
class Expression(Node):
"""Class encpasulating a single stencil expression"""
is_Expression = True
def __init__(self, stencil):
assert isinstance(stencil, Eq)
self.stencil = stencil
<FILEE>
<SCANS> An iterable of :class:`SymbolicData` objects in input to the"""
"""function, or ``None`` if the function takes no parameter."""
""":param prefix: An iterable of qualifiers to prepend to the function declaration."""
"""The default value is ('static', 'inline')."""
is_Function = True
_traversable = ['body']
def __init__(self, name, body, retval, parameters=None, prefix=('static', 'inline')):
self.name = name
self.body = as_tuple(body)
self.retval = retval
self.parameters = as_tuple(parameters)
self.prefix = prefix
def __repr__(self):
parameters = ",".join([c.dtype_to_ctype(i.dtype) for i in self.parameters])
body = "\n\t".join([str(s) for s in self.body])
return "Function[%s]<%s; %s>::\n\t%s" % (self.name, self.retval, parameters, body)
@property
def _cparameters(self):
"""Generate arguments signature."""
cparameters = []
for v in self.parameters:
if isinstance(v, Dimension):
cparameters.append(v.decl)
elif v.is_ScalarData:
cparameters.append(c.Value('const int', v.name))
else:
cparameters.append(c.Pointer(c.POD(v.dtype, '%s_vec' % v.name)))
return cparameters
@property
def _ccasts(self):
"""Generate data casts."""
handle = [f for f in self.parameters if isinstance(f, TensorData)]
shapes = [(f, ''.join(["[%s]" % i.ccode for i in f.indices[1:]])) for f in handle]
casts = [c.Initializer(c.POD(v.dtype, '(*%s)%s' % (v.name, shape)),
'(%s (*)%s) %s' % (c.dtype_to_ctype(v.dtype),
shape, '%s_vec' % v.name))
for v, shape in shapes]
return casts
@property
def _ctop(self):
"""Generate the function declaration."""
return c.FunctionDeclaration(c.Value(self.retval, self.name), self._cparameters)
@property
def ccode(self):
"""Generate C code for the represented C routine."""
""":returns: :class:`cgen.FunctionDeclaration` object representing the function."""
body = [e.ccode for e in self.body]
return c.FunctionBody(self._ctop, c.Block(self._ccasts + body))
@property
def children(self):
return (self.body,)
# Utilities
class TimedList(List):
"""Wrap a Node with C-level timers."""
def __init__(self, lname, gname, body):
"""Initialize a TimedList object."""
""":param lname: Timer name in the local scope."""
""":param gname: Name of the global struct tracking all timers."""
""":param body: Timed block of code."""
<FILEB>
<CHANGES>
yscrollcommand=vscrollbar.set, width=240)
<CHANGEE>
<FILEE>
<FILEB>
if not s:
return True
try:
int(s)
return True
except ValueError:
return False
class VerticalScrolledFrame(Frame):
"""A pure Tkinter vertically scrollable frame."""
"""* Use the 'interior' attribute to place widgets inside the scrollable frame"""
"""* Construct and pack/place/grid normally"""
"""* This frame only allows vertical scrolling"""
def __init__(self, parent, *args, **kw):
Frame.__init__(self, parent, *args, **kw)
# create a canvas object and a vertical scrollbar for scrolling it
vscrollbar = Scrollbar(self, orient=VERTICAL)
vscrollbar.pack(fill=Y, side=RIGHT, expand=FALSE)
canvas = Canvas(self, bd=0, highlightthickness=0,
<CHANGES>
yscrollcommand=vscrollbar.set)
<CHANGEE>
canvas.pack(side=LEFT, fill=BOTH, expand=TRUE)
vscrollbar.config(command=canvas.yview)
# reset the view
canvas.xview_moveto(0)
canvas.yview_moveto(0)
# create a frame inside the canvas which will be scrolled with it
self.interior = interior = Frame(canvas)
interior_id = canvas.create_window(0, 0, window=interior, anchor=NW)
# track changes to the canvas and frame width and sync them,
# also updating the scrollbar
def _configure_interior(event):
# update the scrollbars to match the size of the inner frame
<FILEE>
<SCANS>Frame(frame, borderwidth=2, relief=GROOVE,
text=' Autosave Preferences ')
frameWinSize = Frame(frame, borderwidth=2, relief=GROOVE)
frameHelp = LabelFrame(frame, borderwidth=2, relief=GROOVE,
text=' Additional Help Sources ')
#frameRun
labelRunChoiceTitle = Label(frameRun, text='At Startup')
radioStartupEdit = Radiobutton(
frameRun, variable=self.startupEdit, value=1,
command=self.SetKeysType, text="Open Edit Window")
radioStartupShell = Radiobutton(
frameRun, variable=self.startupEdit, value=0,
command=self.SetKeysType, text='Open Shell Window')
#frameSave
labelRunSaveTitle = Label(frameSave, text='At Start of Run (F5) ')
radioSaveAsk = Radiobutton(
frameSave, variable=self.autoSave, value=0,
command=self.SetKeysType, text="Prompt to Save")
radioSaveAuto = Radiobutton(
frameSave, variable=self.autoSave, value=1,
command=self.SetKeysType, text='No Prompt')
#frameWinSize
labelWinSizeTitle = Label(
frameWinSize, text='Initial Window Size (in characters)')
labelWinWidthTitle = Label(frameWinSize, text='Width')
entryWinWidth = Entry(
frameWinSize, textvariable=self.winWidth, width=3)
labelWinHeightTitle = Label(frameWinSize, text='Height')
entryWinHeight = Entry(
frameWinSize, textvariable=self.winHeight, width=3)
#frameHelp
frameHelpList = Frame(frameHelp)
frameHelpListButtons = Frame(frameHelpList)
scrollHelpList = Scrollbar(frameHelpList)
self.listHelp = Listbox(
frameHelpList, height=5, takefocus=FALSE,
exportselection=FALSE)
scrollHelpList.config(command=self.listHelp.yview)
self.listHelp.config(yscrollcommand=scrollHelpList.set)
self.listHelp.bind('<ButtonRelease-1>', self.HelpSourceSelected)
self.buttonHelpListEdit = Button(
frameHelpListButtons, text='Edit', state=DISABLED,
width=8, command=self.HelpListItemEdit)
self.buttonHelpListAdd = Button(
frameHelpListButtons, text='Add',
width=8, command=self.HelpListItemAdd)
self.buttonHelpListRemove = Button(
frameHelpListButtons, text='Remove', state=DISABLED,
width=8, command=self.HelpListItemRemove)
#widget packing
#body
frameRun.pack(side=TOP, padx=5, pady=5, fill=X)
frameSave.pack(side=TOP, padx=5, pady=5, fill=X)
frameWinSize.pack(side
<FILEB>
<CHANGES>
def wait_for_all_jobs(self, minions=('minion','sub_minion',), sleep=.3):
<CHANGEE>
<FILEE>
<FILEB>
import salt.utils.files
with salt.utils.files.fopen(os.path.join(repo_conf_dir, 'spm.repo'), 'w') as fp:
fp.write(textwrap.dedent('''\'''
'''local_repo:'''
'''url: file://{0}'''.format(self.config['spm_build_dir'])))
u_repo = self.run_spm('update_repo', self.config)
def _spm_client(self, config):
import salt.spm
self.ui = SPMTestUserInterface()
client = salt.spm.SPMClient(self.ui, config)
return client
def run_spm(self, cmd, config, arg=None):
client = self._spm_client(config)
spm_cmd = client.run([cmd, arg])
client._close()
return self.ui._status
class ModuleCase(TestCase, SaltClientTestCaseMixin):
'''Execute a module function'''
<CHANGES>
def wait_for_all_jobs(self, minions=['minion', 'sub_minion'], sleep=.3):
<CHANGEE>
'''Wait for all jobs currently running on the list of minions to finish'''
for minion in minions:
while True:
ret = self.run_function('saltutil.running', minion_tgt=minion, timeout=300)
if ret:
log.debug('Waiting for minion\'s jobs: %s', minion)
time.sleep()
else:
break
def minion_run(self, _function, *args, **kw):
'''Run a single salt function on the 'minion' target and condition'''
'''the return down to match the behavior of the raw function call'''
<FILEE>
<SCANS> raw else '',
self.get_config_dir(),
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test'),
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'roster'),
arg_str
)
return self.run_script('salt-ssh', arg_str, with_retcode=with_retcode, catch_stderr=catch_stderr, raw=True)
def run_run(self,
arg_str,
with_retcode=False,
catch_stderr=False,
asynchronous=False,
timeout=60,
config_dir=None,
**kwargs):
'''Execute salt-run'''
asynchronous = kwargs.get('async', asynchronous)
arg_str = '-c {0}{async_flag} -t {timeout} {1}'.format(
config_dir or self.get_config_dir(),
arg_str,
timeout=timeout,
async_flag=' --async' if asynchronous else '')
return self.run_script('salt-run',
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout)
def run_run_plus(self, fun, *arg, **kwargs):
'''Execute the runner function and return the return data and output in a dict'''
ret = {'fun': fun}
# Late import
import salt.config
import salt.output
import salt.runner
from salt.ext.six.moves import cStringIO
opts = salt.config.master_config(
self.get_config_file_path('master')
)
opts_arg = list(arg)
if kwargs:
opts_arg.append({'__kwarg__': True})
opts_arg[-1].update(kwargs)
opts.update({'doc': False, 'fun': fun, 'arg': opts_arg})
with RedirectStdStreams():
runner = salt.runner.Runner(opts)
ret['return'] = runner.run()
try:
ret['jid'] = runner.jid
except AttributeError:
ret['jid'] = None
# Compile output
# TODO: Support outputters other than nested
opts['color'] = False
opts['output_file'] = cStringIO()
try:
salt.output.display_output(ret['return'], opts=opts)
ret['out'] = opts['output_file'].getvalue()
finally:
opts['output_file'].close()
return ret
def run_key(self, arg_str, catch_stderr=False, with_retcode=False):
'''Execute salt-key'''
arg_str = '-c {0} {1}'.format(self.get_config_dir
<FILEB>
<CHANGES>
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.HSQL):
<CHANGEE>
<FILEE>
<FILEB>
"""PostgreSQL input: SELECT usename, passwd FROM pg_shadow"""
"""PostgreSQL output: 'HsYIBS'||COALESCE(CAST(usename AS CHARACTER(10000)), ' ')||'KTBfZp'||COALESCE(CAST(passwd AS CHARACTER(10000)), ' ')||'LkhmuP' FROM pg_shadow"""
"""Oracle input: SELECT COLUMN_NAME, DATA_TYPE FROM SYS.ALL_TAB_COLUMNS WHERE TABLE_NAME='USERS'"""
"""Oracle output: 'GdBRAo'||NVL(CAST(COLUMN_NAME AS VARCHAR(4000)), ' ')||'czEHOf'||NVL(CAST(DATA_TYPE AS VARCHAR(4000)), ' ')||'JVlYgS' FROM SYS.ALL_TAB_COLUMNS WHERE TABLE_NAME='USERS'"""
"""Microsoft SQL Server input: SELECT name, master.dbo.fn_varbintohexstr(password) FROM master..sysxlogins"""
"""Microsoft SQL Server output: 'QQMQJO'+ISNULL(CAST(name AS VARCHAR(8000)), ' ')+'kAtlqH'+ISNULL(CAST(master.dbo.fn_varbintohexstr(password) AS VARCHAR(8000)), ' ')+'lpEqoi' FROM master..sysxlogins"""
"""@param query: query string to be processed"""
"""@type query: C{str}"""
"""@return: query string nulled, casted and concatenated"""
"""@rtype: C{str}"""
if unpack:
concatenatedQuery = ""
query = query.replace(", ", ',')
fieldsSelectFrom, fieldsSelect, fieldsNoSelect, fieldsSelectTop, fieldsSelectCase, _, fieldsToCastStr, fieldsExists = self.getFields(query)
castedFields = self.nullCastConcatFields(fieldsToCastStr)
concatenatedQuery = query.replace(fieldsToCastStr, castedFields, 1)
else:
return query
<CHANGES>
if Backend.isDbms(DBMS.MYSQL):
<CHANGEE>
if fieldsExists:
concatenatedQuery = concatenatedQuery.replace("SELECT ", "CONCAT('%s'," % kb.chars.start, 1)
concatenatedQuery += ",'%s')" % kb<SCANS> SELECT COLUMN_NAME, DATA_TYPE FROM SYS.ALL_TAB_COLUMNS WHERE TABLE_NAME='%s'"""
"""Microsoft SQL Server input: name,master.dbo.fn_varbintohexstr(password)"""
"""Microsoft SQL Server output: ISNULL(CAST(name AS VARCHAR(8000)), ' ')+'nTBdow'+ISNULL(CAST(master.dbo.fn_varbintohexstr(password) AS VARCHAR(8000)), ' ')"""
"""Microsoft SQL Server scope: SELECT name, master.dbo.fn_varbintohexstr(password) FROM master..sysxlogins"""
"""@param fields: fields string to be processed"""
"""@type fields: C{str}"""
"""@return: fields string nulled, casted and concatened"""
"""@rtype: C{str}"""
if not Backend.getDbms():
return fields
if fields.startswith("(CASE") or fields.startswith("(IIF") or fields.startswith("SUBSTR") or fields.startswith("MID(") or re.search(r"\A'[^']+'\Z", fields):
nulledCastedConcatFields = fields
else:
fieldsSplitted = splitFields(fields)
dbmsDelimiter = queries[Backend.getIdentifiedDbms()].delimiter.query
nulledCastedFields = []
for field in fieldsSplitted:
nulledCastedFields.append(self.nullAndCastField(field))
delimiterStr = "%s'%s'%s" % (dbmsDelimiter, kb.chars.delimiter, dbmsDelimiter)
nulledCastedConcatFields = delimiterStr.join(field for field in nulledCastedFields)
return nulledCastedConcatFields
def getFields(self, query):
"""Take in input a query string and return its fields (columns) and"""
"""more details."""
"""Example:"""
"""Input: SELECT user, password FROM mysql.user"""
"""Output: user,password"""
"""@param query: query to be processed"""
"""@type query: C{str}"""
"""@return: query fields (columns) and more details"""
"""@rtype: C{str}"""
prefixRegex = r"(?:\s+(?:FIRST|SKIP)\
<FILEB>
<CHANGES>
clone._select = [SQL('1')]
<CHANGEE>
<FILEE>
<FILEB>
return query
def aggregate(self, aggregation=None, convert=True):
return self._aggregate(aggregation).scalar(convert=convert)
def count(self):
if self._distinct or self._group_by:
return self.wrapped_count()
# defaults to a count() of the primary key
return self.aggregate(convert=False) or 0
def wrapped_count(self, clear_limit=True):
clone = self.order_by()
if clear_limit:
clone._limit = clone._offset = None
sql, params = clone.sql()
wrapped = 'SELECT COUNT(1) FROM (%s) AS wrapped_select' % sql
rq = self.model_class.raw(wrapped, *params)
return rq.scalar() or 0
def exists(self):
clone = self.paginate(1, 1)
<CHANGES>
clone._select = [self.model_class._meta.primary_key]
<CHANGEE>
return bool(clone.scalar())
def get(self):
clone = self.paginate(1, 1)
try:
return clone.execute().next()
except StopIteration:
raise self.model_class.DoesNotExist(
'Instance matching query does not exist:\nSQL: %s\nPARAMS: %s'
% self.sql())
def first(self):
res = self.execute()
res.fill_cache(1)
<FILEE>
<SCANS> return Entity(self.db_column)
def __ddl_column__(self, column_type):
"""Return the column type, e.g. VARCHAR(255) or REAL."""
modifiers = self.get_modifiers()
if modifiers:
return SQL(
'%s(%s)' % (column_type, ', '.join(map(str, modifiers))))
return SQL(column_type)
def __ddl__(self, column_type):
"""Return a list of Node instances that defines the column."""
ddl = [self._as_entity(), self.__ddl_column__(column_type)]
if not self.null:
ddl.append(SQL('NOT NULL'))
if self.primary_key:
ddl.append(SQL('PRIMARY KEY'))
if self.sequence:
ddl.append(SQL("DEFAULT NEXTVAL('%s')" % self.sequence))
if self.constraints:
ddl.extend(self.constraints)
return ddl
def __hash__(self):
return hash(self.name + '.' + self.model_class.__name__)
class BareField(Field):
db_field = 'bare'
class IntegerField(Field):
db_field = 'int'
coerce = int
class BigIntegerField(IntegerField):
db_field = 'bigint'
class PrimaryKeyField(IntegerField):
db_field = 'primary_key'
def __init__(self, *args, **kwargs):
kwargs['primary_key'] = True
super(PrimaryKeyField, self).__init__(*args, **kwargs)
class FloatField(Field):
db_field = 'float'
coerce = float
class DoubleField(FloatField):
db_field = 'double'
class DecimalField(Field):
db_field = 'decimal'
def __init__(self, max_digits=10, decimal_places=5, auto_round=False,
rounding=None, *args, **kwargs):
self.max_digits = max_digits
self.decimal_places = decimal_places
self.auto_round = auto_round
self.rounding = rounding or decimal.DefaultContext.rounding
super(DecimalField, self).__init__(*args, **kwargs)
def clone_base(self, **kwargs):
return super(DecimalField, self).clone_base(
max_digits=self.max_digits,
decimal_places=self.decimal_places,
auto_round=self.auto_round,
rounding=self.rounding,
**kwargs)
def get_modifiers(self):
return [self.max_digits, self.decimal_places]
def db_value(self, value):
D = decimal.Decimal
if not value:
return value if value is None else D(0)
if self.auto_round:
exp = D(10) ** (-self.decimal_places)
rounding = self.rounding
return D(str(value)).quantize(exp, rounding=rounding)
return value
def python_value(self, value):
if value is not None:
if isinstance(value, decimal.Decimal):
return value
return decimal
<FILEB>
<CHANGES>
if share0s: self.send_share0s(hashes=share0s)
<CHANGEE>
<FILEE>
<FILEB>
self.node.handle_share(share, self)
def send_shares(self, shares, full=False):
share1bs = []
share0s = []
share1as = []
# XXX doesn't need to send full block when it's not urgent
# eg. when getting history
for share in shares:
if share.hash <= share.header['target']:
share1bs.append(share.as_share1b())
else:
if self.mode == 0 and not full:
share0s.append(share.hash)
elif self.mode == 1 or full:
share1as.append(share.as_share1a())
else:
raise ValueError(self.mode)
if share1bs: self.send_share1bs(share1bs=share1bs)
<CHANGES>
if share0s: self.send_share0s(share0s=share0s)
<CHANGEE>
if share1as: self.send_share1as(share1as=share1as)
def connectionLost(self, reason):
if self.node_var_watch is not None:
self.node.mode_var.changed.unwatch(self.node_var_watch)
if self.connected2:
self.node.lost_conn(self)
class ServerFactory(protocol.ServerFactory):
def __init__(self, node):
self.node = node
def buildProtocol(self, addr):
p = Protocol(self.node)
p.factory = self
<FILEE>
<SCANS>
message_version = bitcoin_data.ComposedType([
('version', bitcoin_data.StructType('<I')),
('services', bitcoin_data.StructType('<Q')),
('addr_to', bitcoin_data.address_type),
('addr_from', bitcoin_data.address_type),
('nonce', bitcoin_data.StructType('<Q')),
('sub_version', bitcoin_data.VarStrType()),
('mode', bitcoin_data.StructType('<I')),
('best_share_hash', bitcoin_data.PossiblyNone(0, bitcoin_data.HashType())),
])
def handle_version(self, version, services, addr_to, addr_from, nonce, sub_version, mode, best_share_hash):
self.other_version = version
self.other_services = services
self.other_mode_var = variable.Variable(mode)
if nonce == self.node.nonce:
#print 'Detected connection to self, disconnecting from %s:%i' % (self.transport.getPeer().host, self.transport.getPeer().port)
self.transport.loseConnection()
return
if nonce in self.node.peers:
#print 'Detected duplicate connection, disconnecting from %s:%i' % (self.transport.getPeer().host, self.transport.getPeer().port)
self.transport.loseConnection()
return
self.nonce = nonce
self.connected2 = True
self.node.got_conn(self)
self._think()
self._think2()
if best_share_hash is not None:
self.handle_share0s(hashes=[best_share_hash])
message_update_mode = bitcoin_data.ComposedType([
('mode', bitcoin_data.StructType('<I')),
])
def handle_set_mode(self, mode):
self.other_mode_var.set(mode)
message_ping = bitcoin_data.ComposedType([])
def handle_ping(self):
pass
message_addrme = bitcoin_data.ComposedType([
('port', bitcoin_data.StructType('<H')),
])
def handle_addrme(self, port):
host = self.transport.getPeer().host
#print 'addrme from', host, port
if host == '127.0.0.1':
if random.random() < .8 and self.node.peers:
random.choice(self.node.peers.values()).send_addrme(port=port) # services...
else:
self.node.got_addr((self.transport.getPeer().host, port), self.other_services, int(time.time()))
if random.random() < .8 and self.node.peers:
random.choice(self.node.peers.values()).send_addrs(addrs=[
dict(
address=dict(
services=self.other_services,
address=host,
port=port,
),
<FILEB>
<CHANGES>
parameters['additional_owners'] = ','.join(map(str,additional_owners))
<CHANGEE>
<FILEE>
<FILEB>
"""File-like object to upload."""
"""additional_owners: additional Twitter users that are allowed to use"""
"""The uploaded media. Should be a list of integers. Maximum"""
"""number of additional owners is capped at 100 by Twitter."""
"""media_category:"""
"""Category with which to identify media upload. Only use with Ads"""
"""API & video files."""
"""Returns:"""
"""tuple: media_id (returned from Twitter), file-handler object (i.e., has .read()"""
"""method), filename media file."""
url = '%s/media/upload.json' % self.upload_url
media_fp, filename, file_size, media_type = parse_media_file(media, async_upload=True)
if not all([media_fp, filename, file_size, media_type]):
raise TwitterError({'message': 'Could not process media file'})
parameters = {}
if additional_owners and len(additional_owners) > 100:
raise TwitterError({'message': 'Maximum of 100 additional owners may be specified for a Media object'})
if additional_owners:
<CHANGES>
parameters['additional_owners'] = additional_owners
<CHANGEE>
if media_category:
parameters['media_category'] = media_category
# INIT doesn't read in any data. It's purpose is to prepare Twitter to
# receive the content in APPEND requests.
parameters['command'] = 'INIT'
parameters['media_type'] = media_type
parameters['total_bytes'] = file_size
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
try:
media_id = data['media_id']
except KeyError:
<FILEE>
<SCANS> to a list for the authenticated user."""
"""Args:"""
"""list_id (int, optional):"""
"""The numerical id of the list."""
"""slug (str, optional):"""
"""You can identify a list by its slug instead of its numerical id."""
"""If you decide to do so, note that you'll also have to specify"""
"""the list owner using the owner_id or owner_screen_name parameters."""
"""owner_screen_name (str, optional):"""
"""The screen_name of the user who owns the list being requested by a"""
"""slug."""
"""owner_id (int, optional):"""
"""The user ID of the user who owns the list being requested by a slug."""
"""user_id (int, optional):"""
"""The user_id or a list of user_id's to remove from the list."""
"""If not given, then screen_name is required."""
"""screen_name (str, optional):"""
"""The screen_name or a list of Screen_name's to remove from the list."""
"""If not given, then user_id is required."""
"""Returns:"""
"""twitter.list.List: A twitter.List instance representing the"""
"""removed list."""
is_list = False
parameters = {}
parameters.update(self._IDList(list_id=list_id,
slug=slug,
owner_id=owner_id,
owner_screen_name=owner_screen_name))
if user_id:
if isinstance(user_id, list) or isinstance(user_id, tuple):
is_list = True
uids = [str(enf_type('user_id', int, uid)) for uid in user_id]
parameters['user_id'] = ','.join(uids)
else:
parameters['user_id'] = int(user_id)
elif screen_name:
if isinstance(screen_name, list) or isinstance(screen_name, tuple):
is_list = True
parameters['screen_name'] = ','.join(screen_name)
else:
parameters['screen_name'] = screen_name
if is_list:
url = '%s/lists/members/destroy_all.json' % self.base_url
else:
url = '%s/lists/members/destroy.json' % self.base_url
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
return List.NewFromJsonDict(data)
def GetListsPaged(self,
user_id=None,
screen_name=None,
cursor=-1,
count=20):
"""Fetch the sequence
<FILEB>
<CHANGES>
mail.cc.append(webnotes.conn.get_value("Profile", webnotes.session.user, "email"))
<CHANGEE>
<FILEE>
<FILEB>
'value': result[0]['customer'] or result[0]['supplier']
}
return {}
def send_comm_email(d, name, sent_via=None, print_html=None, attachments='[]', send_me_a_copy=False):
from json import loads
footer = None
if sent_via:
if hasattr(sent_via, "get_sender"):
d.sender = sent_via.get_sender(d) or d.sender
if hasattr(sent_via, "get_subject"):
d.subject = sent_via.get_subject(d)
if hasattr(sent_via, "get_content"):
d.content = sent_via.get_content(d)
footer = set_portal_link(sent_via, d)
from webnotes.utils.email_lib.smtp import get_email
mail = get_email(d.recipients, sender=d.sender, subject=d.subject,
msg=d.content, footer=footer)
if send_me_a_copy:
<CHANGES>
mail.cc.append(d.sender)
<CHANGEE>
if print_html:
mail.add_attachment(name.replace(' ','').replace('/','-') + '.html', print_html)
for a in loads(attachments):
try:
mail.attach_file(a)
except IOError, e:
webnotes.msgprint("""Unable to find attachment %s. Please resend without attaching this file.""" % a,
raise_exception=True)
mail.send()
def set_portal_link(sent_via, comm):
"""set portal link in footer"""
from webnotes.webutils import is_signup_enabled
<FILEE>
<SCANS># Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes
class DocType():
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def get_parent_bean(self):
return webnotes.bean(self.doc.parenttype, self.doc.parent)
def update_parent(self):
"""update status of parent Lead or Contact based on who is replying"""
observer = self.get_parent_bean().get_method("on_communication")
if observer:
observer()
def on_update(self):
self.update_parent()
@webnotes.whitelist()
def make(doctype=None, name=None, content=None, subject=None, sent_or_received = "Sent",
sender=None, recipients=None, communication_medium="Email", send_email=False,
print_html=None, attachments='[]', send_me_a_copy=False, set_lead=True, date=None):
# add to Communication
sent_via = None
# since we are using fullname and email,
# if the fullname has any incompatible characters,formataddr can deal with it
try:
import json
sender = json.loads(sender)
except ValueError:
pass
if isinstance(sender, (tuple, list)) and len(sender)==2:
from email.utils import formataddr
sender = formataddr(sender)
comm = webnotes.new_bean('Communication')
d = comm.doc
d.subject = subject
d.content = content
d.sent_or_received = sent_or_received
d.sender = sender or webnotes.conn.get_value("Profile", webnotes.session.user, "email")
d.recipients = recipients
# add as child
sent_via = webnotes.get_obj(doctype, name)
d.parent = name
d.parenttype = doctype
d.parentfield = "communications"
if date:
d.communication_date = date
d.communication_medium = communication_medium
comm.ignore_permissions = True
comm.insert()
if send_email:
d = comm.doc
send_comm_email(d, name, sent_via, print_html, attachments, send_me_a_copy)
@webnotes.whitelist()
def
<FILEB>
<CHANGES>
raise errors.InconsistentDatabaseError("Bad number of files (%d) " \
<CHANGEE>
<FILEE>
<FILEB>
# Use the exisitng DB connection, or open a new one if None was provided
db = existdb or database.Database()
db.connect()
select = db.select([db.parfiles.c.filename, \
db.parfiles.c.filepath, \
db.parfiles.c.md5sum]).\
where(db.parfiles.c.parfile_id==parfile_id)
result = db.execute(select)
rows = result.fetchall()
result.close()
if not existdb:
# Close the DB connection we opened
db.close()
if len(rows) == 1:
filename = rows[0]['filename']
filepath = rows[0]['filepath']
md5sum_DB = rows[0]['md5sum']
else:
<CHANGES>
raise errors.IncosistentDatabaseError("Bad number of files (%d) " \
<CHANGEE>
"with parfile_id=%d" % (len(rows), parfile_id))
fullpath = os.path.join(filepath,filename)
# Make sure the file exists
Verify_file_path(fullpath)
if verify_md5:
print_info("Confirming MD5 sum of %s matches what is " \
"stored in DB (%s)" % (fullpath, md5sum_DB), 2)
md5sum_file = Get_md5sum(fullpath)
if md5sum_DB != md5sum_file:
raise errors.FileError("md5sum check of %s failed! MD5 from " \
"DB (%s) != MD5 from file (%s)" % \
(fullpath, md5sum_DB, md5sum_file))
<FILEE>
<SCANS> path, or None if no master"""
"""template exists for the provided pulsar/obssystem"""
"""combination."""
db = database.Database()
db.connect()
select = db.select([db.templates.c.template_id, \
db.templates.c.filename, \
db.templates.c.filepath]).\
where((db.master_templates.c.template_id == \
db.templates.c.template_id) & \
(db.master_templates.c.pulsar_id == pulsar_id) & \
(db.master_templates.c.obssystem_id == obssystem_id))
result = db.execute(select)
rows = result.fetchall()
result.close()
db.close()
if len(rows) > 1:
raise errors.InconsistentDatabaseError("There are too many (%d) " \
"master templates for pulsar #%d" % \
(len(rows), pulsar_id ))
elif len(rows) == 0:
return None, None
else:
mastertmp_id = rows[0]['template_id']
path = rows[0]['filepath']
fn = rows[0]['filename']
if path is None or fn is None:
return None, None
else:
return mastertmp_id, os.path.join(path, fn)
def create_rawfile_diagnostic_plots(archivefn, outdir, suffix=""):
"""Given an archive create diagnostic plots to be uploaded"""
"""to the DB."""
"""Inputs:"""
"""archivefn: The archive's name."""
"""outdir: The directory where the plots should be created."""
"""suffix: A string to add to the end of the base output"""
"""file name. (Default: Don't add a suffix)."""
"""NOTE: No dot, underscore, etc is added between the base"""
"""file name and the suffix."""
"""Outputs:"""
"""diagfns: A dictionary of diagnostic files created."""
"""The keys are the plot type descriptions, and """
"""the values are the full path to the plots."""
archivefn = os.path.abspath(archivefn)
hdr = get_header_vals(archivefn, ['name', 'intmjd', 'fracmjd', \
'nsub', 'nchan', 'npol'])
hdr['secs'] = int(hdr['fracmjd']*24*3600+0.5) # Add 0.5 so result is
# rounded to nearest int
basefn = os.path.split(archivefn)[-1]
# Add the suffix to the end of the base file name
basefn += suffix
# To keep track of all diagnostics created, keyed by their description
diagfns = {}
if hdr['nsub
<FILEB>
<CHANGES>
img = check_niimg(img)
<CHANGEE>
<FILEE>
<FILEB>
"""without touching non-zero entries. Will leave one voxel of"""
"""zero padding around the obtained non-zero area in order to"""
"""avoid sampling issues later on."""
"""Parameters"""
"""=========="""
"""img: Niimg-like object"""
"""See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg."""
"""img to be cropped."""
"""rtol: float"""
"""relative tolerance (with respect to maximal absolute"""
"""value of the image), under which values are considered"""
"""negligeable and thus croppable."""
"""copy: boolean"""
"""Specifies whether cropped data is copied or not."""
"""Returns"""
"""======="""
"""cropped_img: image"""
"""Cropped version of the input image"""
<CHANGES>
img = check_niimg_3d(img)
<CHANGEE>
data = img.get_data()
infinity_norm = max(-data.min(), data.max())
passes_threshold = np.logical_or(data < -rtol * infinity_norm,
data > rtol * infinity_norm)
if data.ndim == 4:
passes_threshold = np.any(passes_threshold, axis=-1)
coords = np.array(np.where(passes_threshold))
start = coords.min(axis=1)
end = coords.max(axis=1) + 1
# pad with one voxel to avoid resampling problems
start = np.maximum(start - 1, 0)
end = np.minimum(end + 1, data.shape[:3])
<FILEE>
<SCANS>"""Preprocessing functions for images."""
"""See also nilearn.signal."""
# Authors: Philippe Gervais, Alexandre Abraham
# License: simplified BSD
import collections
import numpy as np
from scipy import ndimage
from sklearn.externals.joblib import Parallel, delayed
from .. import signal
from .resampling import reorder_img
from .._utils import (check_niimg_4d, check_niimg_3d, check_niimg, as_ndarray,
_repr_niimgs)
from .._utils.niimg_conversions import _index_img
from .._utils.niimg import new_img_like, _safe_get_data
from .._utils.compat import _basestring
from .. import masking
def high_variance_confounds(imgs, n_confounds=5, percentile=2.,
detrend=True, mask_img=None):
"""Return confounds signals extracted from input signals with highest"""
"""variance."""
"""Parameters"""
"""=========="""
"""imgs: Niimg-like object"""
"""See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg."""
"""4D image."""
"""mask_img: Niimg-like object"""
"""See http://nilearn.github.io/building_blocks/manipulating_mr_images.html#niimg."""
"""If provided, confounds are extracted from voxels inside the mask."""
"""If not provided, all voxels are used."""
"""n_confounds: int"""
"""Number of confounds to return"""
"""percentile: float"""
"""Highest-variance signals percentile to keep before computing the"""
"""singular value decomposition, 0. <= `percentile` <= 100."""
"""mask_img.sum() * percentile / 100. must be greater than n_confounds."""
"""detrend: bool"""
"""If True, detrend signals before processing."""
"""Returns"""
"""======="""
"""v: numpy.ndarray"""
"""highest variance confounds. Shape: (number of scans, n_confounds)"""
"""Notes"""
"""======"""
"""This method is related to what has been published in the literature"""
"""as 'CompCor' (Behzadi NeuroImage 2007)."""
"""The implemented algorithm does the following:"""
"""- compute sum of squares for each signals (no mean removal)"""
"""- keep a given percentile of signals with highest variance (percentile)"""
"""- compute an svd of the extracted signals"""
"""- return a given number (n_confounds) of signals from the svd with"""
"""highest singular values."""
"""See also"""
"""========"""
"""nilearn.signal.high_variance_confound
<FILEB>
<CHANGES>
self.assertContains(response, b"Currently")
<CHANGEE>
<FILEE>
<FILEB>
self.client.logout()
def test_inline_file_upload_edit_validation_error_post(self):
"""Test that inline file uploads correctly display prior data (#10002)."""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": six.text_type(self.picture.id),
"pictures-0-gallery": six.text_type(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post('/test_admin/%s/admin_views/gallery/%d/' % (self.urlbit, self.gallery.id), post_data)
<CHANGES>
self.assertTrue(response._container[0].find("Currently:") > -1)
<CHANGEE>
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminInlineTests(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
<FILEE>
<SCANS>er',))
class TestInlineNotEditable(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test(self):
"""InlineModelAdmin broken?"""
response = self.client.get('/test_admin/admin/admin_views/parent/add/')
self.assertEqual(response.status_code, 200)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminCustomQuerysetTest(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
self.pks = [EmptyModel.objects.create().id for i in range(3)]
self.super_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
LOGIN_FORM_KEY: 1,
'username': 'super',
'password': 'secret',
}
def test_changelist_view(self):
response = self.client.get('/test_admin/admin/admin_views/emptymodel/')
for i in self.pks:
if i > 1:
self.assertContains(response, 'Primary key = %s' % i)
else:
self.assertNotContains(response, 'Primary key = %s' % i)
def test_changelist_view_count_queries(self):
# create 2 Person objects
Person.objects.create(name='person1', gender=1)
Person.objects.create(name='person2', gender=2)
# 4 queries are expected: 1 for the session, 1 for the user,
# 1 for the count and 1 for the objects on the page
with self.assertNumQueries(4):
resp = self.client.get('/test_admin/admin/admin_views/person/')
self.assertEqual(resp.context['selection_note'], '0 of 2 selected')
self.assertEqual(resp.context['selection_note_all'], 'All 2 selected')
with self.assertNumQueries(4):
extra = {'q': 'not_in_name'}
resp = self.client.get('/test_admin/admin/admin_views/person/', extra)
self.assertEqual(resp.context['selection_note'], '0 of
<FILEB>
<CHANGES>
if field not in [f.fieldname for f in meta.fields] and field not in (default_fields + optional_fields):
<CHANGEE>
<FILEE>
<FILEB>
# draft docs always on top
if meta.is_submittable:
args.order_by = "`tab{0}`.docstatus asc, {1}".format(self.doctype, args.order_by)
def validate_order_by_and_group_by_params(self, parameters, meta):
"""Clause cases:"""
"""1. check for . to split table and columns and check for `tab prefix"""
"""2. elif check field in meta"""
if not parameters:
return
for field in parameters.split(","):
if "." in field and field.strip().startswith("`tab"):
tbl = field.strip().split('.')[0]
if tbl not in self.tables:
if tbl.startswith('`'):
tbl = tbl[4:-1]
frappe.throw(_("Please select atleast 1 column from {0} to sort/group").format(tbl))
else:
field = field.strip().split(' ')[0]
<CHANGES>
if field not in [f.fieldname for f in meta.fields] and field not in default_fields:
<CHANGEE>
frappe.throw(_("Invalid field used to sort/group: {0}").format(field))
def add_limit(self):
if self.limit_page_length:
return 'limit %s, %s' % (self.limit_start, self.limit_page_length)
else:
return ''
def add_comment_count(self, result):
for r in result:
if not r.name:
continue
r._comment_count = 0
if "_comments" in r:
<FILEE>
<SCANS>0]
if table_name.lower().startswith('group_concat('):
table_name = table_name[13:]
if table_name.lower().startswith('ifnull('):
table_name = table_name[7:]
if not table_name[0]=='`':
table_name = '`' + table_name + '`'
if not table_name in self.tables:
self.append_table(table_name)
def append_table(self, table_name):
self.tables.append(table_name)
doctype = table_name[4:-1]
if (not self.flags.ignore_permissions) and (not frappe.has_permission(doctype)):
raise frappe.PermissionError, doctype
def set_field_tables(self):
'''If there are more than one table, the fieldname must not be ambigous.'''
'''If the fieldname is not explicitly mentioned, set the default table'''
if len(self.tables) > 1:
for i, f in enumerate(self.fields):
if '.' not in f:
self.fields[i] = '{0}.{1}'.format(self.tables[0], f)
def set_optional_columns(self):
"""Removes optional columns like `_user_tags`, `_comments` etc. if not in table"""
columns = frappe.db.get_table_columns(self.doctype)
# remove from fields
to_remove = []
for fld in self.fields:
for f in optional_fields:
if f in fld and not f in columns:
to_remove.append(fld)
for fld in to_remove:
del self.fields[self.fields.index(fld)]
# remove from filters
to_remove = []
for each in self.filters:
if isinstance(each, basestring):
each = [each]
for element in each:
if element in optional_fields and element not in columns:
to_remove.append(each)
for each in to_remove:
if isinstance(self.filters, dict):
del self.filters[each]
else:
self.filters.remove(each)
def build_conditions(self):
self.conditions = []
self.grouped_or_conditions = []
self.build_filter_conditions(self.filters, self.conditions)
self.build_filter_conditions(self.or_filters, self.grouped_or_conditions)
# match conditions
if not self.flags.ignore_permissions:
match_conditions = self.build_match_conditions()
<FILEB>
<CHANGES>
for arg in items:
<CHANGEE>
<FILEE>
<FILEB>
'''Return a list of data about server peers.'''
coin = self.env.coin
db = self.db
lines = []
def arg_to_hashX(arg):
try:
script = bytes.fromhex(arg)
lines.append(f'Script: {arg}')
return coin.hashX_from_script(script)
except ValueError:
pass
try:
hashX = coin.address_to_hashX(arg)
except Base58Error as e:
lines.append(e.args[0])
return None
lines.append(f'Address: {arg}')
return hashX
<CHANGES>
for arg in args:
<CHANGEE>
hashX = arg_to_hashX(arg)
if not hashX:
continue
n = None
history = await db.limited_history(hashX, limit=limit)
for n, (tx_hash, height) in enumerate(history):
lines.append(f'History #{n:,d}: height {height:,d} '
f'tx_hash {hash_to_hex_str(tx_hash)}')
if n is None:
lines.append('No history found')
n = None
utxos = await db.all_utxos(hashX)
<FILEE>
<SCANS>.'''
position = -1
for pos, mn in enumerate(payment_queue, start=1):
if mn[2] == address:
position = pos
break
return position
# Accordingly with the masternode payment queue, a custom list
# with the masternode information including the payment
# position is returned.
cache = self.session_mgr.mn_cache
if not cache or self.session_mgr.mn_cache_height != self.db.db_height:
full_mn_list = await self.daemon_request('masternode_list',
['full'])
mn_payment_queue = get_masternode_payment_queue(full_mn_list)
mn_payment_count = len(mn_payment_queue)
mn_list = []
for key, value in full_mn_list.items():
mn_data = value.split()
mn_info = {}
mn_info['vin'] = key
mn_info['status'] = mn_data[0]
mn_info['protocol'] = mn_data[1]
mn_info['payee'] = mn_data[2]
mn_info['lastseen'] = mn_data[3]
mn_info['activeseconds'] = mn_data[4]
mn_info['lastpaidtime'] = mn_data[5]
mn_info['lastpaidblock'] = mn_data[6]
mn_info['ip'] = mn_data[7]
mn_info['paymentposition'] = get_payment_position(
mn_payment_queue, mn_info['payee'])
mn_info['inselection'] = (
mn_info['paymentposition'] < mn_payment_count // 10)
balance = await self.address_get_balance(mn_info['payee'])
mn_info['balance'] = (sum(balance.values())
/ self.coin.VALUE_PER_COIN)
mn_list.append(mn_info)
cache.clear()
cache.extend(mn_list)
self.session_mgr.mn_cache_height = self.db.db_height
# If payees is an empty list the whole masternode list is returned
if payees:
return [mn for mn in cache if mn['payee'] in payees]
else:
return cache
<FILEB>
<CHANGES>
return [node for _, node in self._make_nodes()]
<CHANGEE>
<FILEE>
<FILEB>
if not isdefined(values):
values = []
if node.result.outputs:
values.insert(i, node.result.outputs.get()[key])
else:
values.insert(i, None)
if any([val != Undefined for val in values]) and self._result.outputs:
setattr(self._result.outputs, key, values)
if returncode and any([code is not None for code in returncode]):
msg = []
for i, code in enumerate(returncode):
if code is not None:
msg += ['Subnode %d failed'%i]
msg += ['Error:', str(code)]
raise Exception('Subnodes of node: %s failed:\n%s'%(self.name,
'\n'.join(msg)))
def get_subnodes(self):
self._get_inputs()
<CHANGES>
return [node for node in self._make_nodes()]
<CHANGEE>
def _run_interface(self, execute=True, updatehash=False):
"""Run the mapnode interface"""
"""This is primarily intended for serial execution of mapnode. A parallel"""
"""execution requires creation of new nodes that can be spawned"""
old_cwd = os.getcwd()
cwd = self.output_dir()
os.chdir(cwd)
if execute:
nitems = len(filename_to_list(getattr(self.inputs, self.iterfield[0])))
nodenames = ['_' + self.name+str(i) for i in range(nitems)]
# map-reduce formulation
self._collate_results(self._node_runner(self._make_nodes(cwd),
<FILEE>
<SCANS> node, param, source, sourceinfo):
"""Set inputs of a node given the edge connection"""
if isinstance(sourceinfo, str):
val = source.get_output(sourceinfo)
elif isinstance(sourceinfo, tuple):
if callable(sourceinfo[1]):
val = sourceinfo[1](source.get_output(sourceinfo[0]),
*sourceinfo[2:])
newval = val
if isinstance(val, TraitDictObject):
newval = dict(val)
if isinstance(val, TraitListObject):
newval = val[:]
logger.debug('setting node input: %s->%s', param, str(newval))
node.set_input(param, deepcopy(newval))
def _create_flat_graph(self):
"""Turn a hierarchical DAG into a simple DAG where no node is a workflow"""
logger.debug('Creating flat graph for workflow: %s', self.name)
workflowcopy = deepcopy(self)
workflowcopy._generate_flatgraph()
return workflowcopy._graph
def _reset_hierarchy(self):
"""Reset the hierarchy on a graph"""
for node in self._graph.nodes():
if isinstance(node, Workflow):
node._reset_hierarchy()
for innernode in node._graph.nodes():
innernode._hierarchy = '.'.join((self.name,innernode._hierarchy))
else:
node._hierarchy = self.name
def _generate_flatgraph(self):
"""Generate a graph containing only Nodes or MapNodes"""
logger.debug('expanding workflow: %s', self)
nodes2remove = []
if not nx.is_directed_acyclic_graph(self._graph):
raise Exception('Workflow: %s is not a directed acyclic graph (DAG)'%self.name)
nodes = nx.topological_sort(self._graph)
for node in nodes:
logger.debug('processing node: %s'%node)
if isinstance(node, Workflow):
nodes2remove.append(node)
# use in_edges instead of in_edges_iter to allow
# disconnections to take place properly. otherwise, the
# edge dict is modified.
for u, _, d in self._graph.in_edges(nbunch=node, data=True):
logger.debug('in: connections-> %s'%str(d['connect']))
for cd in deepcopy(d['connect']):
logger.debug("in: %s" % str (cd))
dstnode = node._get_parameter_node(cd[1],subtype='in')
srcnode = u
srcout = cd[0]
dstin = cd[1].split('.')[-1]
logger.debug('in edges: %s %s %s %s'%(srcnode, srcout, dstnode, dstin))
self.disconnect(u, cd[0], node, cd[1])
self.connect(srcnode
<FILEB>
<CHANGES>
self.owner = discord.utils.find(lambda m: m.id == self.config.owner_id and m.voice_channel, self.get_all_members())
<CHANGEE>
<FILEE>
<FILEB>
except discord.Forbidden:
print("Error: Cannot delete message \"%s\", no permission" % message.clean_content)
except discord.NotFound:
print("Warning: Cannot delete message \"%s\", message not found" % message.clean_content)
async def safe_edit_message(self, message, new, *, send_if_fail=False):
try:
return await self.edit_message(message, new)
except discord.NotFound:
print("Warning: Cannot edit message \"%s\", message not found" % message.clean_content)
if send_if_fail:
print("Sending instead")
return await self.safe_send_message(message.channel, new)
# noinspection PyMethodOverriding
def run(self):
return super().run(self.config.username, self.config.password)
async def on_ready(self):
print('Connected!\n')
print("Bot: %s/%s" % (self.user.id, self.user.name))
<CHANGES>
self.owner = discord.utils.get(self.get_all_members(), id=self.config.owner_id)
<CHANGEE>
if not self.owner:
print("Owner could not be found on any server (id: %s)" % self.config.owner_id)
else:
print("Owner: %s/%s" % (self.owner.id, self.owner.name))
if self.config.owner_id == self.user.id:
print("\n"
"[NOTICE] You have either set the OwnerID config option to the bot's id instead "
"of yours, or you've used your own credentials to log the bot in instead of the "
"bot's account (the bot needs its own account to work properly).")
print()
print("Bound to channels: %s" % self.config.bound_channels) # TODO: Print list of channels
# TODO: Make this prettier and easier to read (in the console)
<FILEE>
<SCANS> There is no "message" var, lets get outta here
if not orig_msg:
return await func(self, *args, **kwargs)
vc = self.voice_clients.get(orig_msg.server.id, None)
# If we've connected to a voice chat and we're in the same voice channel
if not vc or (vc and vc.channel == orig_msg.author.voice_channel):
return await func(self, *args, **kwargs)
else:
return Response("you cannot use this command when not in the voice channel", reply=True, delete_after=20)
return wrapper
# TODO: Add some sort of `denied` argument for a message to send when someone else tries to use it
def owner_only(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
# Only allow the owner to use these commands
orig_msg = self._get_variable('message')
if not orig_msg or orig_msg.author.id == self.config.owner_id:
return await func(self, *args, **kwargs)
else:
return Response("only the owner can use this command", reply=True, delete_after=20)
return wrapper
def _get_variable(self, name):
stack = inspect.stack()
try:
for frames in stack:
current_locals = frames[0].f_locals
if name in current_locals:
return current_locals[name]
finally:
del stack
# TODO: autosummon option to a specific channel
async def _auto_summon(self, channel=None):
if self.owner:
await self.handle_summon(self.owner.voice_channel, self.owner)
return True
else:
print("Owner not found in a voice channel, could not autosummon.")
return False
def _fixg(self, x, dp=2):
return ('{:.%sf}' % dp).format(x).rstrip('0').rstrip('.')
async def get_voice_client(self, channel):
if isinstance(channel, Object):
channel = self.get_channel(channel.id)
if getattr(channel, 'type', ChannelType.text) != ChannelType.voice:
raise AttributeError('Channel passed must be a voice channel')
with await self.voice_client_connect_lock:
server = channel.server
if server.id in self.voice_clients:
return self.voice_clients[server.id]
payload = {
'op':
<FILEB>
<CHANGES>
command = sys.executable + " -u " + executableName + " " + " ".join( nextArguments )
<CHANGEE>
<FILEE>
<FILEB>
if package == deplist[ -1 ] and ignoreInstalled:
ignore = True
if ( utils.isInstalled( package[0], package[1], package[2] ) and not ignore ):
if utils.verbose() > 1 and package[1] == packageName:
utils.warning( "already installed %s/%s-%s" % ( package[0], package[1], package[2] ) )
elif utils.verbose() > 2 and not package[1] == packageName:
utils.warning( "already installed %s/%s-%s" % ( package[0], package[1], package[2] ) )
else:
if ( doPretend ):
if utils.verbose() > 0:
utils.warning( "pretending %s/%s-%s" % ( package[0], package[1], package[2] ) )
else:
if not handlePackage( package[0], package[1], package[2], buildAction, opts ):
utils.error( "fatal error: package %s/%s-%s %s failed" % \
(package[0], package[1], package[2], buildAction) )
exit( 1 )
print
if len( nextArguments ) > 0:
<CHANGES>
command = sys.executable + " -u emerge.py " + " ".join( nextArguments )
<CHANGEE>
for element in environ.keys():
if environ[ element ]:
os.environ[ element ] = environ[ element ]
elif element == "EMERGE_VERBOSE":
os.environ[ element ] = "1"
else:
os.environ[ element ] = ""
utils.system( command ) or utils.die( "cannot execute next commands cmd: %s" % command )
<FILEE>
<SCANS>
break
nextArguments = sys.argv[ (sys.argv.index( i ) + 1): ]
if stayQuiet == True:
verbose = 0
os.environ["EMERGE_VERBOSE"] = str( verbose )
# get KDEROOT from env
KDEROOT = os.getenv( "KDEROOT" )
utils.debug( "buildAction: %s" % buildAction )
utils.debug( "doPretend: %s" % doPretend )
utils.debug( "packageName: %s" % packageName )
utils.debug( "buildType: %s" % os.getenv( "EMERGE_BUILDTYPE" ) )
utils.debug( "buildTests: %s" % os.getenv( "EMERGE_BUILDTESTS" ) )
utils.debug( "verbose: %s" % os.getenv( "EMERGE_VERBOSE" ) )
utils.debug( "KDEROOT: %s" % KDEROOT )
if not os.getenv( "CMAKE_INCLUDE_PATH" ) == None:
print
utils.warning( "CMAKE_INCLUDE_PATH found as environment variable. you cannot override emerge"\
" with this - unsetting CMAKE_INCLUDE_PATH locally" )
os.environ["CMAKE_INCLUDE_PATH"]=""
if not os.getenv( "CMAKE_LIBRARY_PATH" ) == None:
print
utils.warning( "CMAKE_LIBRARY_PATH found as environment variable. you cannot override emerge"\
" with this - unsetting CMAKE_LIBRARY_PATH locally" )
os.environ["CMAKE_LIBRARY_PATH"]=""
if not os.getenv( "CMAKE_FIND_PREFIX" ) == None:
print
utils.warning( "CMAKE_FIND_PREFIX found as environment variable. you cannot override emerge"\
" with this - unsetting CMAKE_FIND_PREFIX locally" )
os.environ["CMAKE_FIND_PREFIX"]=""
# adding emerge/bin to find base.py and gnuwin32.py etc.
os.environ["PYTHONPATH"] = os.getenv( "PYTHONPATH" ) + ";" +\
os.path.join( os.getcwd(), os.path.dirname( executableName ) )
sys.path.append( os.path.join( os.getcwd(), os.path.dirname( executableName ) ) )
deplist = []
if packageName:
utils.solveDependencies( "", packageName, "", deplist )
utils.debug( "deplist: %s" % deplist, 2 )
deplist.reverse()
success = True
# package[0] -> category
# package[1] -> package
# package[2] -> version
if ( buildAction != "all" ):
"""if a buildAction is given, then do not try to
<FILEB>
<CHANGES>
match = re.search(pattern, responce.decode('utf-8'), re.U | re.I)
<CHANGEE>
<FILEE>
<FILEB>
)
frappe.db.sql(
"""update `tabEmail Account` set uidvalidity=%s, uidnext=%s where"""
"""name=%s""", (current_uid_validity, uidnext, self.settings.email_account)
)
# uid validity not found pulling emails for first time
if not uid_validity:
self.settings.email_sync_rule = "UNSEEN"
return
sync_count = 100 if uid_validity else int(self.settings.initial_sync_count)
from_uid = 1 if uidnext < (sync_count + 1) or (uidnext - sync_count) < 1 else uidnext - sync_count
# sync last 100 email
self.settings.email_sync_rule = "UID {}:{}".format(from_uid, uidnext)
self.uid_reindexed = True
elif uid_validity == current_uid_validity:
return
def parse_imap_responce(self, cmd, responce):
pattern = r"(?<={cmd} )[0-9]*".format(cmd=cmd)
<CHANGES>
match = re.search(pattern, responce, re.U | re.I)
<CHANGEE>
if match:
return match.group(0)
else:
return None
def retrieve_message(self, message_meta, msg_num=None):
incoming_mail = None
try:
self.validate_message_limits(message_meta)
if cint(self.settings.use_imap):
status, message = self.imap.uid('fetch', message_meta, '(BODY.PEEK[] BODY.PEEK[HEADER] FLAGS)')
raw = message[0]
self.get_email_seen_status(message_meta, raw[0])
<FILEE>
<SCANS>3.x
except ImportError:
from cgi import escape # python 2.x
message = list(part.walk())[1]
headers = []
for key in ('From', 'To', 'Subject', 'Date'):
value = cstr(message.get(key))
if value:
headers.append('{label}: {value}'.format(label=_(key), value=escape(value)))
self.text_content += '\n'.join(headers)
self.html_content += '<hr>' + '\n'.join('<p>{0}</p>'.format(h) for h in headers)
if not message.is_multipart() and message.get_content_type()=='text/plain':
# email.parser didn't parse it!
text_content = self.get_payload(message)
self.text_content += text_content
self.html_content += markdown(text_content)
def get_charset(self, part):
"""Detect chartset."""
charset = part.get_content_charset()
if not charset:
charset = chardet.detect(str(part))['encoding']
return charset
def get_payload(self, part):
charset = self.get_charset(part)
try:
return text_type(part.get_payload(decode=True), str(charset), "ignore")
except LookupError:
return part.get_payload()
def get_attachment(self, part):
#charset = self.get_charset(part)
fcontent = part.get_payload(decode=True)
if fcontent:
content_type = part.get_content_type()
fname = part.get_filename()
if fname:
try:
fname = fname.replace('\n', ' ').replace('\r', '')
fname = cstr(decode_header(fname)[0][0])
except:
fname = get_random_filename(content_type=content_type)
else:
fname = get_random_filename(content_type=content_type)
self.attachments.append({
'content_type': content_type,
'fname': fname,
'fcontent': fcontent,
})
cid = (part.get("Content-Id") or "").strip("><")
if cid:
self.cid_map[fname] = cid
def save_attachments_in_doc(self
<FILEB>
<CHANGES>
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.HSQL):
<CHANGEE>
<FILEE>
<FILEB>
"""PostgreSQL input: SELECT usename, passwd FROM pg_shadow"""
"""PostgreSQL output: 'HsYIBS'||COALESCE(CAST(usename AS CHARACTER(10000)), ' ')||'KTBfZp'||COALESCE(CAST(passwd AS CHARACTER(10000)), ' ')||'LkhmuP' FROM pg_shadow"""
"""Oracle input: SELECT COLUMN_NAME, DATA_TYPE FROM SYS.ALL_TAB_COLUMNS WHERE TABLE_NAME='USERS'"""
"""Oracle output: 'GdBRAo'||NVL(CAST(COLUMN_NAME AS VARCHAR(4000)), ' ')||'czEHOf'||NVL(CAST(DATA_TYPE AS VARCHAR(4000)), ' ')||'JVlYgS' FROM SYS.ALL_TAB_COLUMNS WHERE TABLE_NAME='USERS'"""
"""Microsoft SQL Server input: SELECT name, master.dbo.fn_varbintohexstr(password) FROM master..sysxlogins"""
"""Microsoft SQL Server output: 'QQMQJO'+ISNULL(CAST(name AS VARCHAR(8000)), ' ')+'kAtlqH'+ISNULL(CAST(master.dbo.fn_varbintohexstr(password) AS VARCHAR(8000)), ' ')+'lpEqoi' FROM master..sysxlogins"""
"""@param query: query string to be processed"""
"""@type query: C{str}"""
"""@return: query string nulled, casted and concatenated"""
"""@rtype: C{str}"""
if unpack:
concatenatedQuery = ""
query = query.replace(", ", ',')
fieldsSelectFrom, fieldsSelect, fieldsNoSelect, fieldsSelectTop, fieldsSelectCase, _, fieldsToCastStr, fieldsExists = self.getFields(query)
castedFields = self.nullCastConcatFields(fieldsToCastStr)
concatenatedQuery = query.replace(fieldsToCastStr, castedFields, 1)
else:
return query
<CHANGES>
if Backend.isDbms(DBMS.MYSQL):
<CHANGEE>
if fieldsExists:
concatenatedQuery = concatenatedQuery.replace("SELECT ", "CONCAT('%s'," % kb.chars.start, 1)
concatenatedQuery += ",'%s')" % kb<SCANS>, lengthQuery % fieldsStr, 1)
else:
lengthExpr = lengthQuery % expression
return unescaper.escape(lengthExpr)
def forgeCaseStatement(self, expression):
"""Take in input a query string and return its CASE statement query"""
"""string."""
"""Example:"""
"""Input: (SELECT super_priv FROM mysql.user WHERE user=(SUBSTRING_INDEX(CURRENT_USER(), '@', 1)) LIMIT 0, 1)='Y'"""
"""Output: SELECT (CASE WHEN ((SELECT super_priv FROM mysql.user WHERE user=(SUBSTRING_INDEX(CURRENT_USER(), '@', 1)) LIMIT 0, 1)='Y') THEN 1 ELSE 0 END)"""
"""@param expression: expression to be processed"""
"""@type num: C{str}"""
"""@return: processed expression"""
"""@rtype: C{str}"""
caseExpression = expression
if Backend.getIdentifiedDbms() is not None:
caseExpression = queries[Backend.getIdentifiedDbms()].case.query % expression
if "(IIF" not in caseExpression and Backend.getIdentifiedDbms() in FROM_DUMMY_TABLE and not caseExpression.upper().endswith(FROM_DUMMY_TABLE[Backend.getIdentifiedDbms()]):
caseExpression += FROM_DUMMY_TABLE[Backend.getIdentifiedDbms()]
return caseExpression
def addPayloadDelimiters(self, inpStr):
"""Adds payload delimiters around the input string"""
return "%s%s%s" % (PAYLOAD_DELIMITER, inpStr, PAYLOAD_DELIMITER) if inpStr else inpStr
def removePayloadDelimiters(self, inpStr):
"""Removes payload delimiters from inside the input string"""
return inpStr.replace(PAYLOAD_DELIMITER, '') if inpStr else inpStr
def extractPayload(self, inpStr):
"""Extracts payload from inside of the input string"""
return extractRegexResult("(?s)%s(?P<result>.*?)%s" % (PAYLOAD_DELIMITER, PAYLOAD_DELIMITER), inpStr)
def replacePayload(self, inpStr, payload):
"""Replaces payload inside the input string with a given payload"""
return re.sub("(%s.*?%
<FILEB>
<CHANGES>
self.assertContains(response, b"Currently")
<CHANGEE>
<FILEE>
<FILEB>
self.client.logout()
def test_inline_file_upload_edit_validation_error_post(self):
"""Test that inline file uploads correctly display prior data (#10002)."""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": six.text_type(self.picture.id),
"pictures-0-gallery": six.text_type(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post('/test_admin/%s/admin_views/gallery/%d/' % (self.urlbit, self.gallery.id), post_data)
<CHANGES>
self.assertTrue(response._container[0].find("Currently:") > -1)
<CHANGEE>
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminInlineTests(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
<FILEE>
<SCANS> 1)
def test_generic_relations(self):
"""If a deleted object has GenericForeignKeys pointing to it,"""
"""those objects should be listed for deletion."""
plot = Plot.objects.get(pk=3)
FunkyTag.objects.create(content_object=plot, name='hott')
should_contain = """<li>Funky tag: hott"""
response = self.client.get('/test_admin/admin/admin_views/plot/%s/delete/' % quote(3))
self.assertContains(response, should_contain)
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminViewStringPrimaryKeyTest(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml', 'string-primary-key.xml']
def __init__(self, *args):
super(AdminViewStringPrimaryKeyTest, self).__init__(*args)
self.pk = """abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 -_.!~*'() ;/?:@&=+$, <>#%" {}|\^[]`"""
def setUp(self):
self.client.login(username='super', password='secret')
content_type_pk = ContentType.objects.get_for_model(ModelWithStringPrimaryKey).pk
LogEntry.objects.log_action(100, content_type_pk, self.pk, self.pk, 2, change_message='Changed something')
def tearDown(self):
self.client.logout()
def test_get_history_view(self):
"""Retrieving the history for an object using urlencoded form of primary"""
"""key should work."""
"""Refs #12349, #18550."""
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/history/' % quote(self.pk))
self.assertContains(response, escape(self.pk))
self.assertContains(response, 'Changed something')
self.assertEqual(response.status_code, 200)
def test_get_change_view(self):
"Retrieving the object using urlencoded form of primary key should work"
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/' % quote(self.pk))
self.assertContains(response, escape(self.pk))
self.assertEqual(response.status_code, 200)
def test_
<FILEB>
<CHANGES>
parameters['additional_owners'] = ','.join(map(str,additional_owners))
<CHANGEE>
<FILEE>
<FILEB>
"""File-like object to upload."""
"""additional_owners: additional Twitter users that are allowed to use"""
"""The uploaded media. Should be a list of integers. Maximum"""
"""number of additional owners is capped at 100 by Twitter."""
"""media_category:"""
"""Category with which to identify media upload. Only use with Ads"""
"""API & video files."""
"""Returns:"""
"""tuple: media_id (returned from Twitter), file-handler object (i.e., has .read()"""
"""method), filename media file."""
url = '%s/media/upload.json' % self.upload_url
media_fp, filename, file_size, media_type = parse_media_file(media, async_upload=True)
if not all([media_fp, filename, file_size, media_type]):
raise TwitterError({'message': 'Could not process media file'})
parameters = {}
if additional_owners and len(additional_owners) > 100:
raise TwitterError({'message': 'Maximum of 100 additional owners may be specified for a Media object'})
if additional_owners:
<CHANGES>
parameters['additional_owners'] = additional_owners
<CHANGEE>
if media_category:
parameters['media_category'] = media_category
# INIT doesn't read in any data. It's purpose is to prepare Twitter to
# receive the content in APPEND requests.
parameters['command'] = 'INIT'
parameters['media_type'] = media_type
parameters['total_bytes'] = file_size
resp = self._RequestUrl(url, 'POST', data=parameters)
data = self._ParseAndCheckTwitter(resp.content.decode('utf-8'))
try:
media_id = data['media_id']
except KeyError:
<FILEE>
<SCANS>display_coordinates (bool, optional):"""
"""Whether or not to put a pin on the exact coordinates a tweet"""
"""has been sent from."""
"""trim_user (bool, optional):"""
"""If True the returned payload will only contain the user IDs,"""
"""otherwise the payload will contain the full user data item."""
"""verify_status_length (bool, optional):"""
"""If True, api throws a hard error that the status is over"""
"""CHARACTER_LIMIT characters. If False, Api will attempt to post"""
"""the status."""
"""Returns:"""
"""(twitter.Status) A twitter.Status instance representing the"""
"""message posted."""
url = '%s/statuses/update.json' % self.base_url
if isinstance(status, str) or self._input_encoding is None:
u_status = status
else:
u_status = str(status, self._input_encoding)
if verify_status_length and calc_expected_status_length(u_status) > CHARACTER_LIMIT:
raise TwitterError("Text must be less than or equal to CHARACTER_LIMIT characters.")
if auto_populate_reply_metadata and not in_reply_to_status_id:
raise TwitterError("If auto_populate_reply_metadata is True, you must set in_reply_to_status_id")
parameters = {
'status': u_status,
'in_reply_to_status_id': in_reply_to_status_id,
'auto_populate_reply_metadata': auto_populate_reply_metadata,
'place_id': place_id,
'display_coordinates': display_coordinates,
'trim_user': trim_user,
'exclude_reply_user_ids': ','.join([str(u) for u in exclude_reply_user_ids or []]),
}
if attachment_url:
parameters['attachment_url'] = attachment_url
if media:
chunked_types = ['video/mp4', 'video/quicktime', 'image/gif']
media_ids = []
if isinstance(media, (int, long)):
media_ids.append(media)
elif isinstance(media, list):
for media_file in media:
# If you want to pass just a media ID, it should be an int
if isinstance(media_file, (int, long)):
media_ids.append(media_file)
continue
_, _, file_size, media_type = parse_media_file(media_file)
if (media_type == 'image/gif' or media_type == 'video/mp4') and len(media) > 1:
raise TwitterError(
'You cannot post more than 1 GIF or 1 video
<FILEB>
<CHANGES>
if (isinstance(v.dtype, type) and issubclass(v.dtype, basestring)) or v.dtype.char == 'S':
<CHANGEE>
<FILEE>
<FILEB>
"""(except for boundary variables defined in Section 7.1, "Cell Boundaries" and climatology variables"""
"""defined in Section 7.4, "Climatological Statistics")."""
"""Units are not required for dimensionless quantities. A variable with no units attribute is assumed"""
"""to be dimensionless. However, a units attribute specifying a dimensionless unit may optionally be"""
"""included."""
"""- units required"""
"""- type must be recognized by udunits"""
"""- if std name specified, must be consistent with standard name table, must also be consistent with a"""
"""specified cell_methods attribute if present"""
ret_val = []
deprecated = ['level', 'layer', 'sigma_level']
for k, v in ds.dataset.variables.iteritems():
# skip climatological vars, boundary vars
if v in self._find_clim_vars(ds) or \
v in self._find_boundary_vars(ds).itervalues() or \
v.shape == ():
continue
# skip string type vars
<CHANGES>
if v.dtype.char == 'S':
<CHANGEE>
continue
# skip quality control vars
if hasattr(v, 'flag_meanings'):
continue
if hasattr(v, 'standard_name') and 'status_flag' in v.standard_name:
continue
# skip DSG cf_role
if hasattr(v, "cf_role"):
continue
units = getattr(v, 'units', None)
# 1) "units" attribute must be present
presence = Result(BaseCheck.HIGH, units is not None, ('units', k, 'present'))
<FILEE>
<SCANS> """variable."""
#TODO: We need to identify a non-compliant example of this that can be verified, but I believe
# that if the file is netCDF then this requirement may be met. When we do we can reinsert this check
#pass
def check_fill_value_outside_valid_range(self, ds):
"""2.5.1 The _FillValue should be outside the range specified by valid_range (if used) for a variable."""
fails = []
checked = 0
for k, v in ds.dataset.variables.iteritems():
if hasattr(v, '_FillValue'):
attrs = v.ncattrs()
if 'valid_range' in attrs:
rmin, rmax = v.valid_range
elif 'valid_min' in attrs and 'valid_max' in attrs:
rmin = v.valid_min
rmax = v.valid_max
else:
continue
checked += 1
if v._FillValue >= rmin and v._FillValue <= rmax:
fails.append((k, "%s is between %s and %s" % (v._FillValue, rmin, rmax)))
if checked >= 1:
return Result(BaseCheck.HIGH, (checked - len(fails), checked), msgs=fails)
else:
return []
def check_conventions_are_cf_16(self, ds):
"""2.6.1 the NUG defined global attribute Conventions to the string value "CF-1.6""""
valid_conventions = ['CF-1.0', 'CF-1.1', 'CF-1.2', 'CF-1.3',
'CF-1.4', 'CF-1.5', 'CF-1.6']
if hasattr(ds.dataset, 'Conventions'):
conventions = re.split(',|\s+', getattr(ds.dataset, 'Conventions', ''))
if any((c.strip() in valid_conventions for c in conventions)):
valid = True
reasoning = ['Conventions field is "CF-1.x (x in 0-6)"']
else:
valid = False
reasoning = ['Conventions field is not "CF-1.x (x in 0-6)"']
else:
valid = False
reasoning = ['Conventions field is not present']
return Result(BaseCheck.HIGH, valid, 'conventions', msgs=reasoning)
@score_group('convention_attrs')
def check_convention_globals(self, ds):
"""2.6.2 title/history global attributes, must be strings. Do not need to exist."""
attrs = ['title', 'history']
ret = []
for a in attrs:
if hasattr(ds.dataset, a):
ret.append(Result(BaseCheck.HIGH, isinstance(getattr(ds.dataset, a), basestring), ('global', a)))
return ret
<FILEB>
<CHANGES>
stdin=PIPE, stdout=PIPE, stderr=open('/dev/null','w'))
<CHANGEE>
<FILEE>
<FILEB>
c = out.read(1)
if not c: return # Volume report was interrupted, ignore it
buf += c
if c == '%':
playlist.update(filename, volume = float(volbuf) * VOL_MAX / 100.)
break
else:
volbuf += c
stdout.write(buf)
buf = ''
for filename, volume in playlist:
player_in = None
player_out = None
g_out_reader = None
try:
stdout.write(CLEAR + '\n' * 2)
proc = Popen(['mplayer', '-vo', 'none', '-softvol', '-softvol-max', str(VOL_MAX * 100.),
'-volume', str(volume * 100. / VOL_MAX), filename],
<CHANGES>
stdin=PIPE, stdout=PIPE)
<CHANGEE>
player_in = convert_fobj(proc.stdin)
player_out = convert_fobj(proc.stdout)
g_out_reader = gevent.spawn(out_reader, player_out, stdout, filename)
with RaiseOnExit(proc), \
TermAttrs.modify(exclude=(0,0,0,ECHO|ECHONL|ICANON)):
while True:
c = stdin.read(1)
if c == 'q':
playlist.update(filename, weight=lambda x: x/2.)
player_in.write(" \n")
elif c == 'f':
playlist.update(filename, weight=lambda x: x*2.)
<FILEE>
<SCANS>):
make_nonblocking(fobj.fileno())
return FileObject(fobj, bufsize=0, close=False)
if not stdin: stdin = convert_fobj(sys.stdin)
if not stdout: stdout = convert_fobj(sys.stdout)
if isinstance(playlist, str): playlist = Playlist(playlist)
VOL_MAX = 4 # Sets what interface reports as "100%"
def out_reader(out, stdout, filename):
# This is a turd, please ignore it (it sniffs the output stream for "Volume: X %")
buf = ''
while 1:
c = out.read(1)
if not c: return
buf += c
if not 'Volume:'.startswith(buf):
stdout.write(buf)
buf = ''
elif buf == 'Volume:':
volbuf = ''
while 1:
elif c == 'd':
playlist.update(filename, weight=lambda x: x/2.)
elif c == 'Q':
player_in.write("q")
return
else:
player_in.write(c)
except OSError, e:
# There's a race that can occur here, causing a broken pipe error
if e.errno != errno.EPIPE: raise
except RaiseOnExit.ChildExited:
# This is the expected path out of the input loop
pass
finally:
try:
proc.terminate()
except OSError, e:
if e.errno != errno.ESRCH: raise
proc.wait()
if g_out_reader:
g_out_reader.join()
if playlist.dirty: playlist.writefile()
if __name__ == '__main__':
import debug
gevent.spawn(debug.starve_test, open('/tmp/log', 'w'))
gevent.sleep(0.2)
play(*sys.argv[1:])
<FILEB>
<CHANGES>
formatted_time = str(datetime.timedelta(seconds=int(estimated_time_remaining)))
<CHANGEE>
<FILEE>
<FILEB>
# No validation set, so just assume it's the best so far.
is_best_so_far = True
val_metrics = {}
best_epoch_val_metrics = {}
this_epoch_val_metric = None
self._save_checkpoint(epoch, validation_metric_per_epoch, is_best=is_best_so_far)
self._metrics_to_tensorboard(epoch, train_metrics, val_metrics=val_metrics)
self._metrics_to_console(train_metrics, val_metrics)
if self._learning_rate_scheduler:
# The LRScheduler API is agnostic to whether your schedule requires a validation metric -
# if it doesn't, the validation metric passed here is ignored.
self._learning_rate_scheduler.step(this_epoch_val_metric, epoch)
epoch_elapsed_time = time.time() - epoch_start_time
logger.info("Epoch duration: %s", time.strftime("%H:%M:%S", time.gmtime(epoch_elapsed_time)))
if epoch < self._num_epochs - 1:
training_elapsed_time = time.time() - training_start_time
estimated_time_remaining = training_elapsed_time * \
((self._num_epochs - epoch_counter) / float(epoch - epoch_counter + 1) - 1)
<CHANGES>
formatted_time = time.strftime("%H:%M:%S", time.gmtime(estimated_time_remaining))
<CHANGEE>
logger.info("Estimated training time remaining: %s", formatted_time)
epochs_trained += 1
training_elapsed_time = time.time() - training_start_time
metrics = {
"training_duration": time.strftime("%H:%M:%S", time.gmtime(training_elapsed_time)),
"training_start_epoch": epoch_counter,
"training_epochs": epochs_trained
}
for key, value in train_metrics.items():
metrics["training_" + key] = value
for key, value in val_metrics.items():
metrics["validation_" + key] = value
<FILEE>
<SCANS>
else:
val = value
return val
def add_train_scalar(self, name: str, value: float, global_step: int) -> None:
# get the scalar
if self._train_log is not None:
self._train_log.add_scalar(name, self._item(value), global_step)
def add_train_histogram(self, name: str, values: torch.Tensor, global_step: int) -> None:
if self._train_log is not None:
if isinstance(values, torch.Tensor):
values_to_write = values.cpu().data.numpy().flatten()
self._train_log.add_histogram(name, values_to_write, global_step)
def add_validation_scalar(self, name: str, value: float, global_step: int) -> None:
if self._validation_log is not None:
self._validation_log.add_scalar(name, self._item(value), global_step)
def time_to_str(timestamp: int) -> str:
"""Convert seconds past Epoch to human readable string."""
datetimestamp = datetime.datetime.fromtimestamp(timestamp)
return '{:04d}-{:02d}-{:02d}-{:02d}-{:02d}-{:02d}'.format(
datetimestamp.year, datetimestamp.month, datetimestamp.day,
datetimestamp.hour, datetimestamp.minute, datetimestamp.second
)
def str_to_time(time_str: str) -> datetime.datetime:
"""Convert human readable string to datetime.datetime."""
pieces: Any = [int(piece) for piece in time_str.split('-')]
return datetime.datetime(*pieces)
class Trainer:
def __init__(self,
model: Model,
optimizer: torch.optim.Optimizer,
iterator: DataIterator,
train_dataset: Iterable[Instance],
validation_dataset: Optional[Iterable[Instance]] = None,
patience: Optional[int] = None,
validation_metric: str = "-loss",
validation_iterator: DataIterator = None,
shuffle: bool = True,
num_epochs: int = 20,
serialization_dir: Optional[str] = None,
num_serialized_models_to_keep: int = 20,
keep_serialized_model_every_num_seconds:
<FILEB>
<CHANGES>
clone._select = [SQL('1')]
<CHANGEE>
<FILEE>
<FILEB>
return query
def aggregate(self, aggregation=None, convert=True):
return self._aggregate(aggregation).scalar(convert=convert)
def count(self):
if self._distinct or self._group_by:
return self.wrapped_count()
# defaults to a count() of the primary key
return self.aggregate(convert=False) or 0
def wrapped_count(self, clear_limit=True):
clone = self.order_by()
if clear_limit:
clone._limit = clone._offset = None
sql, params = clone.sql()
wrapped = 'SELECT COUNT(1) FROM (%s) AS wrapped_select' % sql
rq = self.model_class.raw(wrapped, *params)
return rq.scalar() or 0
def exists(self):
clone = self.paginate(1, 1)
<CHANGES>
clone._select = [self.model_class._meta.primary_key]
<CHANGEE>
return bool(clone.scalar())
def get(self):
clone = self.paginate(1, 1)
try:
return clone.execute().next()
except StopIteration:
raise self.model_class.DoesNotExist(
'Instance matching query does not exist:\nSQL: %s\nPARAMS: %s'
% self.sql())
def first(self):
res = self.execute()
res.fill_cache(1)
<FILEE>
<SCANS>
*fk_clause.nodes)
create_foreign_key = return_parsed_node('_create_foreign_key')
def _create_table(self, model_class, safe=False):
statement = 'CREATE TABLE IF NOT EXISTS' if safe else 'CREATE TABLE'
meta = model_class._meta
columns, constraints = [], []
if isinstance(meta.primary_key, CompositeKey):
pk_cols = [meta.fields[f]._as_entity()
for f in meta.primary_key.field_names]
constraints.append(Clause(
SQL('PRIMARY KEY'), EnclosedClause(*pk_cols)))
for field in meta.get_fields():
columns.append(self.field_definition(field))
if isinstance(field, ForeignKeyField) and not field.deferred:
constraints.append(self.foreign_key_constraint(field))
return Clause(
SQL(statement),
model_class._as_entity(),
EnclosedClause(*(columns + constraints)))
create_table = return_parsed_node('_create_table')
def _drop_table(self, model_class, fail_silently=False, cascade=False):
statement = 'DROP TABLE IF EXISTS' if fail_silently else 'DROP TABLE'
ddl = [SQL(statement), model_class._as_entity()]
if cascade:
ddl.append(SQL('CASCADE'))
return Clause(*ddl)
drop_table = return_parsed_node('_drop_table')
def index_name(self, table, columns):
index = '%s_%s' % (table, '_'.join(columns))
if len(index) > 64:
index_hash = hashlib.md5(index.encode('utf-8')).hexdigest()
index = '%s_%s' % (table, index_hash)
return index
def _create_index(self, model_class, fields, unique, *extra):
tbl_name = model_class._meta.db_table
statement = 'CREATE UNIQUE INDEX' if unique else 'CREATE INDEX'
index_name = self.index_name(tbl_name, [f.db_column for f in fields])
return Clause(
SQL(statement),
Entity(index_name),
SQL('ON'),
model_class._as_entity(),
EnclosedClause(*[field._as_entity() for field in fields]),
*extra)
create_index = return_parsed_node('_create_index')
def _create_sequence(self, sequence_name):
return Clause(SQL('CREATE SEQUENCE'), Entity(sequence_name))
create_sequence = return_parsed_node('_create_sequence')
def _drop_sequence(self, sequence_name):
return Clause(SQL('DROP SEQUENCE'), Entity(sequence_name))
drop_sequence = return_parsed_node('_drop_sequence')
class QueryResultWrapper(object):
"""Provides an iterator over the results of a raw Query, additionally doing"""
"""two things:"""
"""- converts rows from the database into python representations"""
"""- ensures that multiple iterations do not result in multiple queries"""
def __init
<FILEB>
<CHANGES>
raise exception.Conflict(type=type, details=e.message)
<CHANGEE>
<FILEE>
<FILEB>
from keystone import identity
def _filter_user(user_ref):
if user_ref:
user_ref.pop('password', None)
return user_ref
def _ensure_hashed_password(user_ref):
pw = user_ref.get('password', None)
if pw is not None:
user_ref['password'] = utils.hash_password(pw)
return user_ref
def handle_conflicts(type='object'):
"""Converts IntegrityError into HTTP 409 Conflict."""
def decorator(method):
@functools.wraps(method)
def wrapper(*args, **kwargs):
try:
return method(*args, **kwargs)
except sql.IntegrityError as e:
<CHANGES>
raise exception.Conflict(type=type, details=str(e))
<CHANGEE>
return wrapper
return decorator
class User(sql.ModelBase, sql.DictBase):
__tablename__ = 'user'
id = sql.Column(sql.String(64), primary_key=True)
name = sql.Column(sql.String(64), unique=True, nullable=False)
#password = sql.Column(sql.String(64))
extra = sql.Column(sql.JsonBlob())
@classmethod
def from_dict(cls, user_dict):
# shove any non-indexed properties into extra
extra = {}
<FILEE>
<SCANS> metadata_ref = self.get_metadata(user_id, tenant_id)
is_new = False
except exception.MetadataNotFound:
metadata_ref = {}
is_new = True
roles = set(metadata_ref.get('roles', []))
if role_id not in roles:
msg = 'Cannot remove role that has not been granted, %s' % role_id
raise exception.RoleNotFound(message=msg)
roles.remove(role_id)
metadata_ref['roles'] = list(roles)
if is_new:
self.create_metadata(user_id, tenant_id, metadata_ref)
else:
self.update_metadata(user_id, tenant_id, metadata_ref)
# CRUD
@handle_conflicts(type='user')
def create_user(self, user_id, user):
user['name'] = clean.user_name(user['name'])
user = _ensure_hashed_password(user)
session = self.get_session()
with session.begin():
user_ref = User.from_dict(user)
session.add(user_ref)
session.flush()
return user_ref.to_dict()
@handle_conflicts(type='user')
def update_user(self, user_id, user):
if 'name' in user:
user['name'] = clean.user_name(user['name'])
session = self.get_session()
if 'id' in user and user_id != user['id']:
raise exception.ValidationError('Cannot change user ID')
with session.begin():
user_ref = session.query(User).filter_by(id=user_id).first()
if user_ref is None:
raise exception.UserNotFound(user_id=user_id)
old_user_dict = user_ref.to_dict()
user = _ensure_hashed_password(user)
for k in user:
old_user_dict[k] = user[k]
new_user = User.from_dict(old_user_dict)
user_ref.name = new_user.name
user_ref.extra = new_user.extra
session.flush()
return user_ref
def delete_user(self, user_id):
session = self.get_session()
with session.begin():
session.query(UserTenantMembership)\
.filter_by(user_id=user_id).delete(False)
session.query(Metadata)\
.filter_by(user_id=user_id).delete(False)
if not session.query(User).filter_by(id=user_id).delete(False):
raise exception.UserNotFound(user_id=user_id)
@handle_conflicts(type='tenant')
def create_tenant(self, tenant_id, tenant):
tenant['name'] = clean.tenant_name(tenant['name'])
session = self.get_session()
with session.begin():
tenant_ref = Tenant.from_dict(tenant)
session.add(tenant_ref
<FILEB>
<CHANGES>
@deprecated('Property.type', since='20140607', future_warning=True)
<CHANGEE>
<FILEE>
<FILEB>
"""@param site: data repository"""
"""@type site: pywikibot.site.DataSite"""
"""@param id: id of the property"""
"""@type id: basestring"""
"""@param datatype: datatype of the property;"""
"""if not given, it will be queried via the API"""
"""@type datatype: basestring"""
self.repo = site
self.id = id.upper()
if datatype:
self._type = datatype
@property
def type(self):
"""Return the type of this property."""
"""@rtype: str"""
if not hasattr(self, '_type'):
self._type = self.repo.getPropertyType(self)
return self._type
<CHANGES>
@deprecated('Property.type', since='20140607')
<CHANGEE>
def getType(self):
"""Return the type of this property."""
"""It returns 'globecoordinate' for type 'globe-coordinate'"""
"""in order to be backwards compatible. See"""
"""https://gerrit.wikimedia.org/r/#/c/135405/ for background."""
if self.type == 'globe-coordinate':
return 'globecoordinate'
else:
return self._type
def getID(self, numeric=False):
"""Get the identifier of this property."""
"""@param numeric: Strip the first letter and return an int"""
<FILEE>
<SCANS>_warning=True)
@deprecated_args(step=True)
def contributingUsers(self, total=None):
"""Return a set of usernames (or IPs) of users who edited this page."""
"""@param total: iterate no more than this number of revisions in total"""
"""@rtype: dict_keys"""
return self.contributors(total=total).keys()
def revision_count(self, contributors=None) -> int:
"""Determine number of edits from contributors."""
"""@param contributors: contributor usernames"""
"""@type contributors: iterable of str or pywikibot.User,"""
"""a single pywikibot.User, a str or None"""
"""@return: number of edits for all provided usernames"""
cnt = self.contributors()
if not contributors:
return sum(cnt.values())
if isinstance(contributors, User):
contributors = contributors.username
if isinstance(contributors, str):
return cnt[contributors]
return sum(cnt[user.username] if isinstance(user, User) else cnt[user]
for user in contributors)
@deprecated('contributors() or revisions()', since='20150206')
@deprecated_args(limit='total')
def getLatestEditors(self, total=1) -> list:
"""Get a list of revision information of the last total edits."""
"""DEPRECATED: Use Page.revisions."""
"""@param total: iterate no more than this number of revisions in total"""
"""@return: list of dict, each dict containing the username and Timestamp"""
return [
{'user': rev.user,
'timestamp': rev.timestamp.isoformat()}
for rev in self.revisions(total=total)]
def merge_history(self, dest, timestamp=None, reason=None):
"""Merge revisions from this page into another page."""
"""See L{APISite.merge_history} for details."""
"""@param dest: Destination page to which revisions will be merged"""
"""@type dest: pywikibot.Page"""
"""@param timestamp: Revisions from this page dating up to this timestamp"""
"""will be merged into the destination page (if not given or False,"""
"""all revisions will be merged)"""
"""@type timestamp: pywikibot.Timestamp"""
"""@param reason: Optional reason for the history merge"""
"""@type reason: str"""
self.site.merge_history(self, dest, timestamp, reason)
@deprecated_args(
throttle=True, deleteAndMove='noredirect', movetalkpage='movetalk')
@remove_last_args(['safe'])
def move(self, newtitle, reason=None, movetalk=True, noredirect=False):
"""Move this page to a new title."""
"""@param newtitle: The new page title."""
"""@param reason: The edit summary for the move."""
"""@param movetalk: If true, move this page's talk page (if it exists)"""
"""@param noredirect: if move succeeds, delete the old page"""
"""(usually requires sysop privileges, depending on wiki settings)"""
if reason is None:
<FILEB>
<CHANGES>
invalids.update(c for c in chlist if isinstance(c, discord.TextChannel))
<CHANGEE>
<FILEE>
<FILEB>
chlist.difference_update(invalids)
self.config.bound_channels.difference_update(invalids)
if chlist:
log.info("Bound to text channels:")
[log.info(' - {}/{}'.format(ch.guild.name.strip(), ch.name.strip())) for ch in chlist if ch]
else:
print("Not bound to any text channels")
if invalids and self.config.debug_mode:
print(flush=True)
log.info("Not binding to voice channels:")
[log.info(' - {}/{}'.format(ch.guild.name.strip(), ch.name.strip())) for ch in invalids if ch]
print(flush=True)
else:
log.info("Not bound to any text channels")
if self.config.autojoin_channels:
chlist = set(self.get_channel(i) for i in self.config.autojoin_channels if i)
chlist.discard(None)
invalids = set()
<CHANGES>
invalids.update(c for c in chlist if c.type == discord.ChannelType.text)
<CHANGEE>
chlist.difference_update(invalids)
self.config.autojoin_channels.difference_update(invalids)
if chlist:
log.info("Autojoining voice chanels:")
[log.info(' - {}/{}'.format(ch.guild.name.strip(), ch.name.strip())) for ch in chlist if ch]
else:
log.info("Not autojoining any voice channels")
if invalids and self.config.debug_mode:
print(flush=True)
log.info("Cannot autojoin text channels:")
[log.info(' - {}/{}'.format(ch.guild.name.strip(), ch.name.strip())) for ch in invalids if ch]
self.autojoin_channels = chlist
<FILEE>
<SCANS> '{0} users have been removed from the blacklist').format(old_len - len(self.blacklist)),
reply=True, delete_after=10
)
async def cmd_id(self, author, user_mentions):
"""Usage:"""
"""{command_prefix}id [@user]"""
"""Tells the user their id or the id of another user."""
if not user_mentions:
return Response(self.str.get('cmd-id-self', 'Your ID is `{0}`').format(author.id), reply=True, delete_after=35)
else:
usr = user_mentions[0]
return Response(self.str.get('cmd-id-other', '**{0}**s ID is `{1}`').format(usr.name, usr.id), reply=True, delete_after=35)
async def cmd_save(self, player, url=None):
"""Usage:"""
"""{command_prefix}save [url]"""
"""Saves the specified song or current song if not specified to the autoplaylist."""
if url or (player.current_entry and not isinstance(player.current_entry, StreamPlaylistEntry)):
if not url:
url = player.current_entry.url
if url not in self.autoplaylist:
self.autoplaylist.append(url)
write_file(self.config.auto_playlist_file, self.autoplaylist)
log.debug("Appended {} to autoplaylist".format(url))
return Response(self.str.get('cmd-save-success', 'Added <{0}> to the autoplaylist.').format(url))
else:
raise exceptions.CommandError(self.str.get('cmd-save-exists', 'This song is already in the autoplaylist.'))
else:
raise exceptions.CommandError(self.str.get('cmd-save-invalid', 'There is no valid song playing.'))
@owner_only
async def cmd_joinserver(self, message, server_link=None):
"""Usage:"""
"""{command_prefix}joinserver invite_link"""
"""Asks the bot to join a server. Note: Bot accounts cannot use invite links."""
url = await self.generate_invite_link()
return Response(
self.str.get('cmd-joinserver-response', "Click here to add me to a server: \n{}").format(url),
reply=True, delete_after=30
)
async def cmd_karaoke(self, player, channel, author):
"""Usage:"""
"""{command_prefix}karaoke"""
<FILEB>
<CHANGES>
raise Exception( "Failed to get paramemeters for dataset id %d " % data.id )
<CHANGEE>
<FILEE>
<FILEB>
raise Exception("Failed to get job information for dataset hid %d" % data.hid)
# Get the tool object
tool_id = job.tool_id
try:
# Load the tool
toolbox = self.get_toolbox()
tool = toolbox.tools_by_id.get( tool_id, None )
except:
#this is expected, so not an exception
error( "This dataset was created by an obsolete tool (%s). Can't re-run." % tool_id )
# Can't rerun upload, external data sources, et cetera. Workflow
# compatible will proxy this for now
if not tool.is_workflow_compatible:
error( "The '%s' tool does not currently support rerunning." % tool.name )
# Get the job's parameters
try:
params_objects = job.get_param_values( trans.app )
except:
<CHANGES>
raise Exception( "Failed to get paramemeters for dataset id %d " % hid )
<CHANGEE>
# Unpack unvalidated values to strings, they'll be validated when the
# form is submitted (this happens when re-running a job that was
# initially run by a workflow)
validated_params = {}
for name, value in params_objects.items():
if isinstance( value, UnvalidatedValue ):
validated_params [ str(name) ] = str(value)
else:
validated_params [ str(name) ] = value
params_objects = validated_params
# Create a fake tool_state for the tool, with the parameters values
state = tool.new_state( trans )
<FILEE>
<SCANS> url_paste:
line = line.rstrip( '\r\n' ).strip()
if not line:
continue
elif line.lower().startswith( 'http://' ) or line.lower().startswith( 'ftp://' ):
url = True
datasets.append( create_dataset( line ) )
else:
if url:
continue # non-url when we've already processed some urls
else:
# pasted data
datasets.append( create_dataset( 'Pasted Entry' ) )
break
return [ d.id for d in datasets ]
@web.expose
def upload_async_message( self, trans, **kwd ):
# might be more appropriate in a different controller
msg = """<p>Your upload has been queued. History entries that are still uploading will be blue, and turn green upon completion.</p>"""
"""<p><b>Please do not use your browser\'s "stop" or "reload" buttons until the upload is complete, or it may be interrupted.</b></p>"""
"""<p>You may safely continue to use Galaxy while the upload is in progress. Using "stop" and "reload" on pages other than Galaxy is also safe.</p>"""
return trans.show_message( msg, refresh_frames='history' )
<FILEB>
<CHANGES>
logging.debug(attributes['bill_id'])
<CHANGEE>
<FILEE>
<FILEB>
elif bill_number[0] == '2':
bill_prefix = 'SB'
elif bill_number[0] == '3':
bill_prefix = 'HCR'
elif bill_number[0] == '4':
bill_prefix = 'SCR'
elif bill_number[0] == '5':
bill_prefix = 'HR'
elif bill_number[0] == '6':
bill_prefix = 'SR'
elif bill_number[0] == '7':
bill_prefix = 'HMR'
elif bill_number[0] == '8':
bill_prefix = 'SMR'
attributes['bill_id'] = bill_prefix + ' ' + bill_number
# Skip duplicates (bill is listed once for each version)
if attributes['bill_id'] in indexed_bills.keys():
continue
<CHANGES>
print attributes['bill_id']
<CHANGEE>
# Parse details page
attributes.update(
self.scrape_bill_details(assembly_url, bill_number))
# Create bill
bill = Bill(**attributes)
# Parse actions
actions = self.scrape_bill_actions(assembly_url, bill_number, year)
for action in actions:
bill.add_action(**action)
# Parse versions
versions = self.scrape_bill_versions(assembly_url, bill_number)
for version in versions:
<FILEE>
<SCANS>^[0-9]{4}$', bill_number):
raise ScrapeError('Bill number not in expected format.')
# ND bill prefixes are coded numerically
if bill_number[0] == '1':
bill_prefix = 'HB'
bill.add_version(**version)
# Add bill to dictionary, indexed by its id
indexed_bills[attributes['bill_id']] = bill
# Parse sponsorship data
if int(year) >= 2005:
logging.info('Scraping sponsorship data.')
sponsors = self.scrape_bill_sponsors(assembly_url)
for bill_id, sponsor_list in sponsors.items():
for sponsor in sponsor_list:
# Its possible a bill was misnamed somewhere... but thats
# not a good enough reason to error out
if bill_id in indexed_bills.keys():
indexed_bills[bill_id].add_sponsor(**sponsor)
else:
logging.info('Sponsorship data not available for %s.' % year)
logging.info('Saving scraped bills.')
# Save bill
for bill in indexed_bills.values():
self.add_bill(bill)
def scrape_bill_details(self, assembly_url, bill_number):
"""Scrape details from the history page of a specific ND bill."""
url = \
self.site_root + \
assembly_url + \
('/bill-actions/ba%s.html' % bill_number)
# Parsing
soup = self.parser.parse(self.urlopen(url))
attributes = {}
# Bill title
table = soup.find('table', summary='Measure Number Breakdown')
# There is at least one page that contains no valid data: 2001 / SB2295
if not table:
return { 'title': u''}
text = ''
rows = table.findAll('tr')
# Skip the first two rows relating too who introduced the bill
i = 2
while not rows[i].find('hr'):
text = text + ' ' + rows[i].td.contents[0].strip()
i = i + 1
attributes['title'] = text
return attributes
def scrape_bill_actions(self, assembly_url, bill_number, year):
"""Scrape actions from the history page of a specific ND bill."""
url = \
self.site_root + \
assembly_url + \
('/bill-actions/ba%s.html' % bill_number)
# Parsing
soup = self.parser.parse(self.urlopen(url))
actions = []
table = soup.find('table', summary='Measure Number Breakdown')
# There is at least one page that contains no valid data: 2001 / SB2295
if not table:
return []
headers = table.findAll('th')
# These fields must be stored temporarily as they are not repeated on
# every row
action_date = None
actor = None
for header in headers:
action = {}
# Both the date
<FILEB>
<CHANGES>
if reason!=REASON_ONLYCACHE and isEditable:
<CHANGEE>
<FILEE>
<FILEB>
newPropertyValues['_role']=newPropertyValues.get('role',obj.role)
# The real states are needed also, as the states entry might be filtered.
newPropertyValues['_states']=obj.states
text=getSpeechTextForProperties(reason,**newPropertyValues)
if text:
speakText(text,index=index)
def speakObject(obj,reason=REASON_QUERY,index=None):
isEditable=bool(obj.role==controlTypes.ROLE_EDITABLETEXT or controlTypes.STATE_EDITABLE in obj.states)
allowProperties={'name':True,'role':True,'states':True,'value':True,'description':True,'keyboardShortcut':True,'positionString':True}
if not config.conf["presentation"]["reportObjectDescriptions"]:
allowProperties["description"]=False
if not config.conf["presentation"]["reportKeyboardShortcuts"]:
allowProperties["keyboardShortcut"]=False
if not config.conf["presentation"]["reportObjectPositionInformation"]:
allowProperties["positionString"]=False
if isEditable:
allowProperties['value']=False
speakObjectProperties(obj,reason=reason,index=index,**allowProperties)
<CHANGES>
if isEditable:
<CHANGEE>
info=obj.makeTextInfo(textHandler.POSITION_SELECTION)
if not info.isCollapsed:
speakMessage(_("selected %s")%info.text)
else:
info.expand(textHandler.UNIT_READINGCHUNK)
speakMessage(info.text)
def speakText(text,index=None,wait=False,reason=REASON_MESSAGE):
"""Speaks some given text."""
"""This function will not speak if L{speechMode} is false."""
"""@param text: the message to speak"""
"""@type text: string"""
"""@param wait: if true, the function will not return until the text has finished being spoken. If false, the function will return straight away."""
<FILEE>
<SCANS>))
index=count+1
if globalVars.log.getEffectiveLevel() <= logging.INFO: globalVars.log.info("Speaking \"%s\""%char)
getSynth().speakText(char,index=index)
if uppercase and config.conf["speech"][getSynth().name]["raisePitchForCapitals"]:
getSynth().pitch=oldPitch
while textLength>1 and globalVars.keyCounter==lastKeyCount and (isPaused or getLastSpeechIndex()!=index):
time.sleep(0.05)
api.processPendingEvents()
queueHandler.flushQueue(queueHandler.eventQueue)
if globalVars.keyCounter!=lastKeyCount:
break
if uppercase and config.conf["speech"][getSynth().name]["beepForCapitals"]:
tones.beep(2000,50)
def speakObjectProperties(obj,reason=REASON_QUERY,index=None,**allowedProperties):
global beenCanceled
del globalXMLFieldStack[:]
if speechMode==speechMode_off:
return
elif speechMode==speechMode_beeps:
tones.beep(config.conf["speech"]["beepSpeechModePitch"],speechMode_beeps_ms)
return
if isPaused:
cancelSpeech()
beenCanceled=False
#Fetch the values for all wanted properties
newPropertyValues={}
for name,value in allowedProperties.iteritems():
if value:
newPropertyValues[name]=getattr(obj,name)
#Fetched the cached properties and update them with the new ones
oldCachedPropertyValues=getattr(obj,'_speakObjectPropertiesCache',{}).copy()
cachedPropertyValues=oldCachedPropertyValues.copy()
cachedPropertyValues.update(newPropertyValues)
obj._speakObjectPropertiesCache=cachedPropertyValues
#If we should only cache we can stop here
if reason==REASON_ONLYCACHE:
return
#If only speaking change, then filter out all values that havn't changed
if reason==REASON_CHANGE:
for name in set(newPropertyValues)&set(oldCachedPropertyValues):
if newPropertyValues[name]==oldCachedPropertyValues[name]:
del newPropertyValues[name]
elif name=="states": #states need specific handling
oldStates=oldCachedPropertyValues[name]
newStates=newPropertyValues[name]
newPropertyValues['
<FILEB>
<CHANGES>
yscrollcommand=vscrollbar.set, width=240)
<CHANGEE>
<FILEE>
<FILEB>
if not s:
return True
try:
int(s)
return True
except ValueError:
return False
class VerticalScrolledFrame(Frame):
"""A pure Tkinter vertically scrollable frame."""
"""* Use the 'interior' attribute to place widgets inside the scrollable frame"""
"""* Construct and pack/place/grid normally"""
"""* This frame only allows vertical scrolling"""
def __init__(self, parent, *args, **kw):
Frame.__init__(self, parent, *args, **kw)
# create a canvas object and a vertical scrollbar for scrolling it
vscrollbar = Scrollbar(self, orient=VERTICAL)
vscrollbar.pack(fill=Y, side=RIGHT, expand=FALSE)
canvas = Canvas(self, bd=0, highlightthickness=0,
<CHANGES>
yscrollcommand=vscrollbar.set)
<CHANGEE>
canvas.pack(side=LEFT, fill=BOTH, expand=TRUE)
vscrollbar.config(command=canvas.yview)
# reset the view
canvas.xview_moveto(0)
canvas.yview_moveto(0)
# create a frame inside the canvas which will be scrolled with it
self.interior = interior = Frame(canvas)
interior_id = canvas.create_window(0, 0, window=interior, anchor=NW)
# track changes to the canvas and frame width and sync them,
# also updating the scrollbar
def _configure_interior(event):
# update the scrollbars to match the size of the inner frame
<FILEE>
<SCANS>Changes(self):
"Dynamically apply configuration changes"
winInstances = self.parent.instance_dict.keys()
for instance in winInstances:
instance.ResetColorizer()
instance.ResetFont()
instance.set_notabs_indentwidth()
instance.ApplyKeybindings()
instance.reset_help_menu_entries()
def Cancel(self):
self.destroy()
def Ok(self):
self.Apply()
self.destroy()
def Apply(self):
self.DeactivateCurrentConfig()
self.SaveAllChangedConfigs()
self.ActivateConfigChanges()
def Help(self):
page = self.tabPages._current_page
view_text(self, title='Help for IDLE preferences',
text=help_common+help_pages.get(page, ''))
def CreatePageExtensions(self):
"""Part of the config dialog used for configuring IDLE extensions."""
"""This code is generic - it works for any and all IDLE extensions."""
"""IDLE extensions save their configuration options using idleConf."""
"""This code reads the current configuration using idleConf, supplies a"""
"""GUI interface to change the configuration values, and saves the"""
"""changes using idleConf."""
"""Not all changes take effect immediately - some may require restarting IDLE."""
"""This depends on each extension's implementation."""
"""All values are treated as text, and it is up to the user to supply"""
"""reasonable values. The only exception to this are the 'enable*' options,"""
"""which are boolean, and can be toggled with an True/False button."""
parent = self.parent
frame = self.tabPages.pages['Extensions'].frame
self.ext_defaultCfg = idleConf.defaultCfg['extensions']
self.ext_userCfg = idleConf.userCfg['extensions']
self.is_int = self.register(is_int)
self.load_extensions()
# create widgets - a listbox shows all available extensions, with the
# controls for the extension selected in the listbox to the right
self.extension_names = StringVar(self)
frame.rowconfigure(0, weight=1)
frame.columnconfigure(2, weight=1)
self.extension_list = Listbox(frame, listvariable=self.extension_names,
selectmode='browse')
self.extension_list.bind('<<ListboxSelect>>', self.extension_selected)
scroll = Scrollbar(frame, command=self.extension_list.yview)
self.extension_list.yscrollcommand=scroll.set
self.details_frame = LabelFrame(frame, width=250, height=250)
self.extension_list.grid(column=0, row=0, sticky='nws')
scroll.grid(column=1, row=0, sticky='ns')
self.details_frame.grid(column=2, row=0, sticky='nsew', padx=[10, 0])
frame.configure(padx
<FILEB>
<CHANGES>
assert_greater(adjusted_rand_score(y_true, y_pred), 0.9)
<CHANGEE>
<FILEE>
<FILEB>
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_discretize(seed=36):
# Test the discretize using a noise assignment matrix
LB = LabelBinarizer()
for n_sample in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
random_state = np.random.RandomState(seed)
y_true = random_state.random_integers(0, n_class, n_sample)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_true_noisy = (LB.fit_transform(y_true)
+ 0.1 * random_state.randn(n_sample, n_class + 1))
y_pred = discretize(y_true_noisy)
<CHANGES>
assert_equal(adjusted_rand_score(y_true, y_pred), 1)
<CHANGEE>
<FILEE>
<SCANS>
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="lobpcg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_clustering_sparse():
# We need a large matrice, or the lobpcg solver will fallback to its
# non-sparse and buggy mode
S = np.array([[1, 5, 2, 2, 1, 0, 0, 0, 0, 0],
[5, 1, 3, 2, 1, 0, 0, 0, 0, 0],
[2, 3, 1, 1, 1, 0, 0, 0,
<FILEB>
<CHANGES>
terminated_at=utils.utcnow())
<CHANGEE>
<FILEE>
<FILEB>
host = instance['host']
if host:
self._cast_compute_message('terminate_instance', context,
instance_id, host)
else:
terminate_volumes(self.db, context, instance_id)
self.db.instance_destroy(context, instance_id)
@scheduler_api.reroute_compute("stop")
def stop(self, context, instance_id):
"""Stop an instance."""
LOG.debug(_("Going to try to stop %s"), instance_id)
instance = self._get_instance(context, instance_id, 'stopping')
if not _is_able_to_shutdown(instance, instance_id):
return
self.update(context,
instance['id'],
state_description='stopping',
state=power_state.NOSTATE,
<CHANGES>
terminated_at=datetime.datetime.utcnow())
<CHANGEE>
host = instance['host']
if host:
self._cast_compute_message('stop_instance', context,
instance_id, host)
def start(self, context, instance_id):
"""Start an instance."""
LOG.debug(_("Going to try to start %s"), instance_id)
instance = self._get_instance(context, instance_id, 'starting')
if instance['state_description'] != 'stopped':
_state_description = instance['state_description']
LOG.warning(_("Instance %(instance_id)s is not "
"stopped(%(_state_description)s)") % locals())
<FILEE>
<SCANS># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles all requests relating to instances (guest vms)."""
import eventlet
import re
import time
from nova import db
from nova import exception
from nova import flags
import nova.image
from nova import log as logging
from nova import network
from nova import quota
from nova import rpc
from nova import utils
from nova import volume
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute.utils import terminate_volumes
from nova.scheduler import api as scheduler_api
from nova.db import base
LOG = logging.getLogger('nova.compute.api')
FLAGS = flags.FLAGS
flags.DECLARE('vncproxy_topic', 'nova.vnc')
flags.DEFINE_integer('find_host_timeout', 30,
'Timeout after NN seconds when looking for a host.')
def generate_default_hostname(instance_id):
"""Default function to generate a hostname given an instance reference."""
return str(instance_id)
def _is_able_to_shutdown(instance, instance_id):
states = {'terminating': "Instance %s is already being terminated",
'migrating': "Instance %s is being migrated",
'stopping': "Instance %s is being stopped"}
msg = states.get(instance['state_description'])
if msg:
LOG.warning(_(msg), instance_id)
return False
return True
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_service=None, network_api=None,
volume_api=None, hostname_factory=generate_default_hostname,
**kwargs):
self.image_service = image_service or \
nova.image.get_default_image_service()
if not network_api:
network_api = network.API()
self.network_api = network_api
if not volume_api:
volume_api = volume.API()
self.volume_api = volume_api
self.hostname_factory = hostname_factory
super(API, self).__init__(**kwargs)
def get_network_topic(self, context, instance_id):
<FILEB>
<CHANGES>
if (a - b).is_integer and (a - b).is_negative == False:
<CHANGEE>
<FILEE>
<FILEB>
return -1
l1 = list(bucket[mod])
l2 = list(obucket[mod])
l1.sort()
l2.sort()
for i, j in zip(l1, l2):
diff += abs(i - j)
return diff
def _is_suitable_origin(self):
"""Decide if ``self`` is a suitable origin."""
"""A function is a suitable origin iff:"""
"""* none of the ai equals bj + n, with n a non-negative integer"""
"""* none of the ai is zero"""
"""* none of the bj is a non-positive integer"""
"""Note that this gives meaningful results only when none of the indices"""
"""are symbolic."""
for a in self.ap:
for b in self.bq:
<CHANGES>
if (a - b).is_integer and (a < b) == False:
<CHANGEE>
return False
for a in self.ap:
if a == 0:
return False
for b in self.bq:
if b.is_integer and b.is_nonpositive:
return False
return True
class G_Function(Expr):
"""A Meijer G-function. """
def __new__(cls, an, ap, bm, bq):
obj = super(G_Function, cls).__new__(cls)
<FILEE>
<SCANS> - _x), _x)
#print n
self._poly = Poly((m - n)/b0, _x)
def __str__(self):
return '<Increment lower a index #%s of %s, %s, %s, %s.>' % (self._i,
self._an, self._ap, self._bm, self._bq)
class ReduceOrder(Operator):
"""Reduce Order by cancelling an upper and a lower index. """
def __new__(cls, ai, bj):
"""For convenience if reduction is not possible, return None. """
ai = sympify(ai)
bj = sympify(bj)
n = ai - bj
if not n.is_Integer or n < 0:
return None
if bj.is_integer and bj <= 0 and bj + n - 1 >= 0:
return None
self = Operator.__new__(cls)
p = S(1)
for k in xrange(n):
p *= (_x + bj + k)/(bj + k)
self._poly = Poly(p, _x)
self._a = ai
self._b = bj
return self
@classmethod
def _meijer(cls, b, a, sign):
"""Cancel b + sign*s and a + sign*s"""
"""This is for meijer G functions. """
b = sympify(b)
a = sympify(a)
n = b - a
if n.is_negative or not n.is_Integer:
return None
self = Operator.__new__(cls)
p = S(1)
for k in xrange(n):
p *= (sign*_x + a + k)
self._poly = Poly(p, _x)
if sign == -1:
self._a = b
self._b = a
else:
self._b = Add(1, a - 1, evaluate=False)
self._a = Add(1, b - 1, evaluate=False)
return self
@classmethod
def meijer_minus(cls, b, a):
return cls._meijer(b, a, -1)
@classmethod
def meijer_plus(cls, a, b):
return cls._meijer(1 - a, 1 - b, 1)
def __str__(self):
return '<Reduce order by cancelling upper %s with lower %s.>' % \
(self._a, self._b)
def _reduce_order(ap, bq, gen, key):
"""Order reduction algorithm used in Hypergeometric and Meijer G """
ap = list(ap)
bq = list(bq)
ap.sort(key=key)
bq.sort(key=key)
nap = []
# we will edit bq in place
operators = []
for a in ap:
op = None
for i in xrange(len(bq)):
op = gen(a, bq[i])
if op is not None:
bq.pop(i)
break
if op is None:
nap.append(a
<FILEB>
<CHANGES>
raise osv.except_osv(_('Already Reconciled'), _('Already Reconciled'))
<CHANGEE>
<FILEE>
<FILEB>
def onchange_account_id(self, cr, uid, ids, account_id=False):
val = {}
if account_id:
tax_ids = self.pool.get('account.account').browse(cr, uid, account_id).tax_ids
val['account_tax_id'] = tax_ids and tax_ids[0].id or False
return {'value':val}
#
# type: the type if reconciliation (no logic behind this field, for info)
#
# writeoff; entry generated for the difference between the lines
#
def reconcile_partial(self, cr, uid, ids, type='auto', context={}):
merges = []
unmerge = []
total = 0.0
merges_rec = []
for line in self.browse(cr, uid, ids, context):
if line.reconcile_id:
<CHANGES>
raise _('Already Reconciled')
<CHANGEE>
if line.reconcile_partial_id:
for line2 in line.reconcile_partial_id.line_partial_ids:
if not line2.reconcile_id:
merges.append(line2.id)
total += (line2.debit or 0.0) - (line2.credit or 0.0)
merges_rec.append(line.reconcile_partial_id.id)
else:
unmerge.append(line.id)
total += (line.debit or 0.0) - (line.credit or 0.0)
if not total:
res = self.reconcile(cr, uid, merges+unmerge, context=context)
return res
<FILEE>
<SCANS>update_journal_check(cr, uid, context['journal_id'], context['period_id'], context)
move_id = vals.get('move_id', False)
journal = self.pool.get('account.journal').browse(cr, uid, context['journal_id'])
if not move_id:
if journal.centralisation:
# use the first move ever created for this journal and period
cr.execute('select id, state, name from account_move where journal_id=%s and period_id=%s order by id limit 1', (context['journal_id'],context['period_id']))
res = cr.fetchone()
if res:
if res[1] != 'draft':
raise osv.except_osv(_('UserError'),
_('The account move (%s) for centralisation ' \
'has been confirmed!') % res[2])
vals['move_id'] = res[0]
if not vals.get('move_id', False):
if journal.sequence_id:
#name = self.pool.get('ir.sequence').get_id(cr, uid, journal.sequence_id.id)
v = {
'date': vals.get('date', time.strftime('%Y-%m-%d')),
'period_id': context['period_id'],
'journal_id': context['journal_id']
}
move_id = self.pool.get('account.move').create(cr, uid, v, context)
vals['move_id'] = move_id
else:
raise osv.except_osv(_('No piece number !'), _('Can not create an automatic sequence for this piece !\n\nPut a sequence in the journal definition for automatic numbering or create a sequence manually for this piece.'))
else:
if 'date' in vals:
self.pool.get('account.move').write(cr, uid, [move_id], {'date':vals['date']}, context=context)
del vals['date']
ok = not (journal.type_control_ids or journal.account_control_ids)
if ('account_id' in vals):
account = account_obj.browse(cr, uid, vals['account_id'])
if journal.type_control_ids:
type = account.user_type
for t in journal.type_control_ids:
if type==t.code:
ok = True
break
if journal.account_control_ids and not ok:
for a in journal.account_control_ids:
if a.id==vals['account_id']:
ok = True
break
if (account.currency_id) and 'amount_currency' not in vals:
vals['currency_id'] = account.currency_id.id
cur_obj = self.pool.get('res.currency')
<FILEB>
<CHANGES>
raise errors.MissingCommandlineFlag(msg)
<CHANGEE>
<FILEE>
<FILEB>
account_storage.save(acc)
return acc, acme
def perform_registration(acme, config):
"""Actually register new account, trying repeatedly if there are email"""
"""problems"""
""":param .IConfig config: Client configuration."""
""":param acme.client.Client client: ACME client object."""
""":returns: Registration Resource."""
""":rtype: `acme.messages.RegistrationResource`"""
""":raises .UnexpectedUpdate:"""
try:
return acme.register(messages.NewRegistration.from_data(email=config.email))
except messages.Error as e:
if e.typ == "urn:acme:error:invalidEmail":
if config.noninteractive_mode:
msg = ("The email you specified was unable to be verified "
"by acme. Please ensure it is a valid email and "
"attempt registration again.")
<CHANGES>
raise erros.MissingCommandlineFlag(msg)
<CHANGEE>
else:
config.namespace.email = display_ops.get_email(invalid=True)
return perform_registration(acme, config)
else:
raise
class Client(object):
"""ACME protocol client."""
""":ivar .IConfig config: Client configuration."""
""":ivar .Account account: Account registered with `register`."""
""":ivar .AuthHandler auth_handler: Authorizations handler that will"""
"""dispatch DV challenges to appropriate authenticators"""
"""(providing `.IAuthenticator` interface)."""
<FILEE>
<SCANS> self.account.key)
self.acme = acme
if auth is not None:
self.auth_handler = auth_handler.AuthHandler(
auth, self.acme, self.account)
else:
self.auth_handler = None
def obtain_certificate_from_csr(self, domains, csr,
typ=OpenSSL.crypto.FILETYPE_ASN1, authzr=None):
"""Obtain certificate."""
"""Internal function with precondition that `domains` are"""
"""consistent with identifiers present in the `csr`."""
""":param list domains: Domain names."""
""":param .util.CSR csr: DER-encoded Certificate Signing"""
"""Request. The key used to generate this CSR can be different"""
"""than `authkey`."""
""":param list authzr: List of"""
""":class:`acme.messages.AuthorizationResource`"""
""":returns: `.CertificateResource` and certificate chain (as"""
"""returned by `.fetch_chain`)."""
""":rtype: tuple"""
if self.auth_handler is None:
msg = ("Unable to obtain certificate because authenticator is "
"not set.")
logger.warning(msg)
raise errors.Error(msg)
if self.account.regr is None:
raise errors.Error("Please register with the ACME server first.")
logger.debug("CSR: %s, domains: %s", csr, domains)
if authzr is None:
authzr = self.auth_handler.get_authorizations(domains)
certr = self.acme.request_issuance(
jose.ComparableX509(
OpenSSL.crypto.load_certificate_request(typ, csr.data)),
authzr)
return certr, self.acme.fetch_chain(certr)
def obtain_certificate(self, domains):
"""Obtains a certificate from the ACME server."""
"""`.register` must be called before `.obtain_certificate`"""
""":param list domains: domains to get a certificate"""
""":returns: `.CertificateResource`, certificate chain (as"""
"""returned by `.fetch_chain`), and newly generated private key"""
"""(`.util.Key`) and DER-encoded Certificate Signing Request"""
"""(`.util.CSR`)."""
""":rtype: tuple"""
authzr = self.auth_handler.get_authorizations(
domains,
self.config.allow_subset_of_names)
auth_domains = set(a.body.identifier.value for a in authzr)
domains = [d for d in domains if d in auth_domains]
# Create CSR from names
key = crypto_util.init_save_key(
self.config.rsa_key_size, self.config.key_dir)
csr = crypto_util.init_save_csr(key, domains, self.config.csr_dir)
return (self.obtain_certificate_from_csr(domains, csr, authzr=authzr)
+ (key, csr))
def obtain_and_enroll_certificate(self,
<FILEB>
<CHANGES>
self.scores, _ = self.inference(
<CHANGEE>
<FILEE>
<FILEB>
d = tf.reduce_sum(
tf.reshape(a, [-1, FLAGS.max_sentence_len, 1, 1]) * hidden,
[1, 2])
ds = tf.reshape(d, [-1, self.numHidden * 2])
scores = tf.nn.xw_plus_b(ds, self.clfier_softmax_W,
self.clfier_softmax_b)
return scores, length
else:
raise ValueError('model must either be clfier or ner')
def ner_loss(self, ner_cX, ner_Y):
P, sequence_length = self.inference(ner_cX, model='ner')
log_likelihood, self.transition_params = tf.contrib.crf.crf_log_likelihood(
P, ner_Y, sequence_length)
loss = tf.reduce_mean(-log_likelihood)
regularization_loss = tf.add_n(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
return loss + regularization_loss * FLAGS.l2_reg_lambda
def clfier_loss(self, clfier_cX, clfier_Y, entity_info):
<CHANGES>
self.scores = self.inference(
<CHANGEE>
clfier_cX, model='clfier', entity_info=entity_info, rnn_reuse=True)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.scores, labels=clfier_Y)
loss = tf.reduce_mean(cross_entropy, name='cross_entropy')
regularization_loss = tf.add_n(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
normed_embedding = tf.nn.l2_normalize(self.common_id_embedding, dim=1)
similarity_matrix = tf.matmul(normed_embedding,
tf.transpose(normed_embedding, [1, 0]))
fro_norm = tf.reduce_sum(tf.nn.l2_loss(similarity_matrix))
final_loss = loss + regularization_loss * FLAGS.l2_reg_lambda + fro_norm * FLAGS<SCANS>
ner_total_loss, var_list=clfier_seperate_list)
sv = tf.train.Supervisor(graph=graph, logdir=FLAGS.ner_clfier_log_dir)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25)
with sv.managed_session(
master='',
config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
# actual training loop
training_steps = FLAGS.train_steps
for step in range(training_steps):
if sv.should_stop():
break
try:
if step < FLAGS.joint_steps:
_, trainsMatrix = sess.run(
[ner_train_op, model.transition_params])
else:
_, trainsMatrix = sess.run(
[ner_seperate_op, model.transition_params])
# for debugging and learning purposes, see how the loss gets decremented thru training steps
if (step + 1) % 10 == 0:
print(
"[%d] NER loss: [%r] Sentiment Classification loss: [%r]"
% (step + 1, sess.run(ner_total_loss),
sess.run(clfier_total_loss)))
if (step + 1) % 20 == 0:
entity_infos = ner_test_evaluate(
sess, ner_test_unary_score,
ner_test_sequence_length, trainsMatrix,
model.inp_c, ner_tcX, ner_tY)
tentity_info = entity_to_common(entity_infos)
clfier_test_evaluate(
sess, test_clfier_score, model.inp_c, entity_info,
clfier_tcX, clfier_tY, tentity_info)
if step < FLAGS.joint_steps:
_ = sess.run([clfier_train_op])
else:
_ = sess.run([clfier_seperate_op])
except KeyboardInterrupt, e:
sv.saver.save(
sess,
FLAGS.ner_clfier_log_dir + '/model',
global_step=(step + 1))
<FILEB>
<CHANGES>
self.am = event.mouse_region_x, event.mouse_region_y
<CHANGEE>
<FILEE>
<FILEB>
context.object.modifiers["Solidify"].name = "CT_SOLIDIFY"
context.object.modifiers["CT_SOLIDIFY"].thickness = 0.1
Selection_Restore(self)
# Help display
if event.type == context.scene.Key_Help and event.value == 'PRESS':
self.AskHelp = not self.AskHelp
# Instantiate object
if event.type == context.scene.Key_Instant and event.value == 'PRESS':
self.Instantiate = not self.Instantiate
# Close polygonal shape
if event.type == context.scene.Key_Close and event.value == 'PRESS':
if self.CreateMode:
self.Closed = not self.Closed
if event.type == context.scene.Key_Apply and event.value == 'PRESS':
self.DontApply = not self.DontApply
# Scale object
if event.type == context.scene.Key_Scale and event.value == 'PRESS':
if self.ObjectScale == False:
<CHANGES>
elf.am = event.mouse_region_x, event.mouse_regiony
<CHANGEE>
self.ObjectScale = True
# Grid : Add column
if event.type == 'UP_ARROW' and event.value == 'PRESS':
self.nbcol += 1
update_grid(self, context)
# Grid : Add row
elif event.type == 'RIGHT_ARROW' and event.value == 'PRESS':
self.nbrow += 1
update_grid(self, context)
# Grid : Delete column
elif event.type == 'DOWN_ARROW' and event.value == 'PRESS':
self.nbcol -= 1
<FILEE>
<SCANS>
# Remet l'objet actif par défaut
context.scene.objects.active = SavActive
# Picking (template)
def Picking(context, event):
# get the context arguments
scene = context.scene
region = context.region
rv3d = context.region_data
coord = event.mouse_region_x, event.mouse_region_y
# get the ray from the viewport and mouse
view_vector = view3d_utils.region_2d_to_vector_3d(region, rv3d, coord)
ray_origin = view3d_utils.region_2d_to_origin_3d(region, rv3d, coord)
ray_target = ray_origin + view_vector
def visible_objects_and_duplis():
for obj in context.visible_objects:
if obj.type == 'MESH':
yield (obj, obj.matrix_world.copy())
if obj.dupli_type != 'NONE':
obj.dupli_list_create(scene)
for dob in obj.dupli_list:
obj_dupli = dob.object
if obj_dupli.type == 'MESH':
yield (obj_dupli, dob.matrix.copy())
obj.dupli_list_clear()
def obj_ray_cast(obj, matrix):
# get the ray relative to the object
matrix_inv = matrix.inverted()
ray_origin_obj = matrix_inv * ray_origin
ray_target_obj = matrix_inv * ray_target
ray_direction_obj = ray_target_obj - ray_origin_obj
# cast the ray
success, location, normal, face_index = obj.ray_cast(ray_origin_obj, ray_direction_obj)
if success:
return location, normal, face_index
else:
return None, None, None
# cast rays and find the closest object
best_length_squared = -1.0
best_obj = None
# cast rays and find the closest object
for obj, matrix in visible_objects_and_duplis():
if obj.type == 'MESH':
hit, normal, face_index = obj_ray_cast(obj, matrix)
if hit is not None:
hit_world = matrix * hit
length_squared = (hit_world - ray_origin).length_squared
if best_obj is None or length_squared < best_length_squared:
scene.cursor_location = hit_world
best_length_squared = length_squared
best_obj = obj
else:
if best_obj is None:
depthLocation = region_2d_to_vector_3d(region, rv3d, coord)
loc = region_2d_to_location_3d(region, rv3d, coord, depthLocation
<FILEB>
<CHANGES>
return [(token, '', 0)]
<CHANGEE>
<FILEE>
<FILEB>
regex_name = self.regexlist[regex][1]
longest_match = (result, regex_name, new_priority)
if len(result) == len(longest_match[0]):
new_priority = self.regexlist[regex][0]
old_priority = longest_match[2]
if new_priority < old_priority: # use token with higher priority (smaller numbers have higher priority)
regex_name = self.regexlist[regex][1]
longest_match = (result, regex_name, new_priority)
if longest_match[0] != "":
any_match_found = True
remaining = remaining[len(longest_match[0]):]
matches.append(longest_match)
else:
matches.append((remaining, ""))
break
if any_match_found:
return matches
else:
<CHANGES>
return None
<CHANGEE>
<FILEE>
<SCANS>import re
class TokenLexer(object):
def __init__(self, regexlist):
self.regexlist = regexlist
self.compiled_regexes = {}
for regex in self.regexlist:
self.compiled_regexes[regex] = re.compile(regex)
def match(self, token):
matches = []
remaining = token
any_match_found = False
while remaining != "":
longest_match = ("", "", 999999)
for regex in self.regexlist:
m = self.compiled_regexes[regex].match(remaining)
if m:
result = m.group(0)
if len(result) > len(longest_match[0]):
new_priority = self.regexlist[regex][0]
<FILEB>
<CHANGES>
context, instance, legacy=False)
<CHANGEE>
<FILEE>
<FILEB>
cleaned_keys = dict(
key_data=instance.key_data,
auto_disk_config=instance.auto_disk_config)
instance.key_data = None
instance.auto_disk_config = False
return cleaned_keys
def _unshelve_instance_key_restore(self, instance, keys):
"""Restore previously scrubbed keys before saving the instance."""
instance.update(keys)
def _unshelve_instance(self, context, instance, image):
self._notify_about_instance_usage(context, instance, 'unshelve.start')
compute_info = self._get_compute_info(context, self.host)
instance.task_state = task_states.SPAWNING
instance.node = compute_info['hypervisor_hostname']
instance.host = self.host
instance.save()
network_info = self._get_instance_nw_info(context, instance)
bdms = self.conductor_api.block_device_mapping_get_all_by_instance(
<CHANGES>
context, instance)
<CHANGEE>
block_device_info = self._prep_block_device(context, instance, bdms)
scrubbed_keys = self._unshelve_instance_key_scrub(instance)
try:
self.driver.spawn(context, instance, image, injected_files=[],
admin_password=None,
network_info=network_info,
block_device_info=block_device_info)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_('Instance failed to spawn'), instance=instance)
if image:
image_service = glance.get_default_image_service()
<FILEE>
<SCANS>get_all_by_instance(self, context, instance,
legacy=True):
capi = self._compute.conductor_api
return capi.block_device_mapping_get_all_by_instance(context, instance,
legacy=legacy)
def block_device_mapping_update(self, context, bdm_id, values):
return self._compute.conductor_api.block_device_mapping_update(
context, bdm_id, values)
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
RPC_API_VERSION = '2.48'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
self.virtapi = ComputeVirtAPI(self)
self.network_api = network.API()
self.volume_api = volume.API()
self._last_host_check = 0
self._last_bw_usage_poll = 0
self._last_vol_usage_poll = 0
self._last_info_cache_heal = 0
self._last_bw_usage_cell_update = 0
self.compute_api = compute.API()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.conductor_api = conductor.API()
self.compute_task_api = conductor.ComputeTaskAPI()
self.is_neutron_security_groups = (
openstack_driver.is_neutron_security_groups())
self.consoleauth_rpcapi = consoleauth.rpcapi.ConsoleAuthAPI()
self.cells_rpcapi = cells_rpcapi.CellsAPI()
self._resource_tracker_dict = {}
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
# NOTE(russellb) Load the driver last. It may call back into the
# compute manager via the virtapi, so we want it to be fully
# initialized before that happens.
self.driver = driver.load_compute_driver(self.virtapi, compute_driver)
self.use_legacy_block_device_info = \
self.driver.need_legacy_block_device_info
def _get_resource_tracker(self, nodename):
rt = self._resource_tracker_dict.get(nodename)
if not rt:
if not self.driver.node_is_available(nodename):
raise exception.NovaException(
_("%s is not a valid node managed by this "
"compute host.") % nodename)
rt = resource_tracker.ResourceTracker(self.host,
self.driver,
nod
<FILEB>
<CHANGES>
LOG_FORMAT_SIMPLIFIED: SimplifiedSyncLog,
<CHANGEE>
<FILEE>
<FILEB>
"""Just maintains a flat list of case IDs on the phone rather than the case/dependent state"""
"""lists from the SyncLog class."""
log_format = StringProperty(default=LOG_FORMAT_SIMPLIFIED)
case_ids_on_phone = StringListProperty()
def phone_is_holding_case(self, case_id):
"""Whether the phone currently has a case, according to this sync log"""
# todo: if we do this a lot we may want to convert case_ids_on_phone to a memoized set
return case_id in self.case_ids_on_phone
def get_footprint_of_cases_on_phone(self):
return self.case_ids_on_phone
def get_properly_wrapped_sync_log(doc_id):
"""Looks up and wraps a sync log, using the class based on the 'log_format' attribute."""
"""Defaults to the existing legacy SyncLog class."""
doc = SyncLog.get_db().get(doc_id)
return get_sync_log_class_by_format(doc.get('log_format')).wrap(doc)
def get_sync_log_class_by_format(format):
return {
LOG_FORMAT_LEGACY: SyncLog,
<CHANGES>
LOG_FORMAT_CLEAN_OWNERS: CleanOwnerSyncLog,
<CHANGEE>
}.get(format, SyncLog)
class OwnershipCleanlinessFlag(models.Model):
"""Stores whether an owner_id is "clean" aka has a case universe only belonging"""
"""to that ID."""
"""We use this field to optimize restores."""
domain = models.CharField(max_length=100, db_index=True)
owner_id = models.CharField(max_length=100, db_index=True)
is_clean = models.BooleanField(default=False)
last_checked = models.DateTimeField()
hint = models.CharField(max_length=100, null=True, blank=True)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
<FILEE>
<SCANS> cs and case_id in self.get_footprint_of_cases_on_phone():
return True
return False
def get_state_hash(self):
return CaseStateHash(Checksum(self.get_footprint_of_cases_on_phone()).hexdigest())
def reconcile_cases(self):
"""Goes through the cases expected to be on the phone and reconciles"""
"""any duplicate records."""
"""Return True if any duplicates were found."""
num_cases_on_phone_before = len(self.cases_on_phone)
num_dependent_cases_before = len(self.dependent_cases_on_phone)
self.cases_on_phone = list(set(self.cases_on_phone))
self.dependent_cases_on_phone = list(set(self.dependent_cases_on_phone))
if num_cases_on_phone_before != len(self.cases_on_phone) \
or num_dependent_cases_before != len(self.dependent_cases_on_phone):
self._case_state_map.reset_cache(self)
self._dependent_case_state_map.reset_cache(self)
return True
return False
def __unicode__(self):
return "%s synced on %s (%s)" % (self.user_id, self.date.date(), self.get_id)
class SimplifiedSyncLog(AbstractSyncLog):
"""New, simplified sync log class that is used by ownership cleanliness restore."""
self.last_checked = datetime.utcnow()
super(OwnershipCleanlinessFlag, self).save(force_insert, force_update, using, update_fields)
@classmethod
def get_for_owner(cls, domain, owner_id):
return cls.objects.get_or_create(domain=domain, owner_id=owner_id)[0]
class Meta:
unique_together = [('domain', 'owner_id')]
<FILEB>
<CHANGES>
titleSetting = int(self.plugin.get_setting('titledisplay', unicode))
<CHANGEE>
<FILEE>
<FILEB>
name=channel[Keys.NAME]),
'is_playable': True,
'icon': videobanner if videobanner else logo
}
def getTitleForChannel(self, channel):
titleValues = self.extractTitleValues(channel)
return self.titleBuilder.formatTitle(titleValues)
class TitleBuilder(object):
class Templates(object):
TITLE = "{title}"
STREAMER = "{streamer}"
STREAMER_TITLE = "{streamer} - {title}"
VIEWERS_STREAMER_TITLE = "{viewers} - {streamer} - {title}"
ELLIPSIS = '...'
def __init__(self, PLUGIN, line_length):
self.plugin = PLUGIN
self.line_length = line_length
def formatTitle(self, titleValues):
<CHANGES>
titleSetting = int(self.plugin.get_setting('titledisplay'))
<CHANGEE>
template = self.getTitleTemplate(titleSetting)
for key, value in titleValues.iteritems():
titleValues[key] = self.cleanTitleValue(value)
title = template.format(**titleValues)
return self.truncateTitle(title)
def getTitleTemplate(self, titleSetting):
options = {0: TitleBuilder.Templates.STREAMER_TITLE,
1: TitleBuilder.Templates.VIEWERS_STREAMER_TITLE,
2: TitleBuilder.Templates.TITLE,
3: TitleBuilder.Templates.STREAMER}
return options.get(titleSetting, TitleBuilder.Templates.STREAMER)
def cleanTitleValue(self, value):
<FILEE>
<SCANS>from twitch import Keys
class JsonListItemConverter(object):
def __init__(self, PLUGIN, title_length):
self.plugin = PLUGIN
self.titleBuilder = TitleBuilder(PLUGIN, title_length)
def convertGameToListItem(self, game):
name = game[Keys.NAME].encode('utf-8')
image = game[Keys.LOGO].get(Keys.LARGE, '')
return {'label': name,
'path': self.plugin.url_for('createListForGame',
gameName=name, index='0'),
'icon': image
}
def convertTeamToListItem(self, team):
name = team['name']
return {'label': name,
'path': self.plugin.url_for(endpoint='createListOfTeamStreams',
team=name),
'icon': team.get(Keys.LOGO, '')
}
def convertTeamChannelToListItem(self, teamChannel):
images = teamChannel.get('image', '')
image = '' if not images else images.get('size600', '')
channelname = teamChannel['name']
titleValues = {'streamer': teamChannel.get('display_name'),
'title': teamChannel.get('title'),
'viewers': teamChannel.get('current_viewers')}
title = self.titleBuilder.formatTitle(titleValues)
return {'label': title,
'path': self.plugin.url_for(endpoint='playLive', name=channelname),
'is_playable': True,
'icon': image}
def extractTitleValues(self, channel):
return {'streamer': channel.get(Keys.DISPLAY_NAME,
self.plugin.get_string(34000)),
'title': channel.get(Keys.STATUS,
self.plugin.get_string(34001)),
'viewers': channel.get(Keys.VIEWERS,
self.plugin.get_string(34002))
}
def convertChannelToListItem(self, channel):
videobanner = channel.get(Keys.VIDEO_BANNER, '')
logo = channel.get(Keys.LOGO, '')
return {'label': self.getTitleForChannel(channel),
'path': self.plugin.url_for(endpoint='playLive',
if isinstance(value, basestring):
return unicode(value).replace('\r\n', ' ').strip().encode('utf-8')
else:
return value
def truncateTitle(self, title):
shortTitle = title[:self.line_length]
ending = (title[self.line_length:] and TitleBuilder.Templates.ELLIPSIS)
return shortTitle + ending
<FILEB>
<CHANGES>
self.assertContains(response, b"Currently")
<CHANGEE>
<FILEE>
<FILEB>
self.client.logout()
def test_inline_file_upload_edit_validation_error_post(self):
"""Test that inline file uploads correctly display prior data (#10002)."""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": six.text_type(self.picture.id),
"pictures-0-gallery": six.text_type(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post('/test_admin/%s/admin_views/gallery/%d/' % (self.urlbit, self.gallery.id), post_data)
<CHANGES>
self.assertTrue(response._container[0].find("Currently:") > -1)
<CHANGEE>
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminInlineTests(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
<FILEE>
<SCANS>0 selected')
self.assertEqual(resp.context['selection_note_all'], 'All 0 selected')
with self.assertNumQueries(4):
extra = {'q': 'person'}
resp = self.client.get('/test_admin/admin/admin_views/person/', extra)
self.assertEqual(resp.context['selection_note'], '0 of 2 selected')
self.assertEqual(resp.context['selection_note_all'], 'All 2 selected')
# here one more count(*) query will run, because filters were applied
with self.assertNumQueries(5):
extra = {'gender__exact': '1'}
resp = self.client.get('/test_admin/admin/admin_views/person/', extra)
self.assertEqual(resp.context['selection_note'], '0 of 1 selected')
self.assertEqual(resp.context['selection_note_all'], '1 selected')
def test_change_view(self):
for i in self.pks:
response = self.client.get('/test_admin/admin/admin_views/emptymodel/%s/' % i)
if i > 1:
self.assertEqual(response.status_code, 200)
else:
self.assertEqual(response.status_code, 404)
def test_add_model_modeladmin_defer_qs(self):
# Test for #14529. defer() is used in ModelAdmin.get_queryset()
# model has __unicode__ method
self.assertEqual(CoverLetter.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"author": "Candidate, Best",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/coverletter/add/',
post_data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(CoverLetter.objects.count(), 1)
# Message should contain non-ugly model verbose name
self.assertContains(
response,
'<li class="success">The cover letter "Candidate, Best" was added successfully.</li>',
html=True
)
# model has no __unicode__ method
self.assertEqual(ShortMessage.objects.count(), 0)
# Emulate model instance creation via the admin
post_data = {
"content": "What's this SMS thing?",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/shortmessage/add/',
post_data, follow=True
<FILEB>
<CHANGES>
pipeline = Pipeline([('vect', CountVectorizer(min_df=1)),
<CHANGEE>
<FILEE>
<FILEB>
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# simulate iterables
train_data = iter(data[1:-1])
test_data = iter([data[0], data[-1]])
# label junk food as -1, the others as +1
y = np.ones(len(data))
y[:6] = -1
y_train = y[1:-1]
y_test = np.array([y[0], y[-1]])
<CHANGES>
pipeline = Pipeline([('vect', CountVectorizer()),
<CHANGEE>
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('l1', 'l2')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# cross-validation doesn't work if the length of the data is not known,
# hence use lists instead of iterators
pred = grid_search.fit(list(train_data), y_train).predict(list(test_data))
assert_array_equal(pred, y_test)
<FILEE>
<SCANS>test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1', min_df=1)
assert_false(tv.fixed_vocabulary)
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
def test_feature_names():
cv = CountVectorizer(max_df=0.5, min_df=1)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equals(set
<FILEB>
<CHANGES>
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.HSQL):
<CHANGEE>
<FILEE>
<FILEB>
"""PostgreSQL input: SELECT usename, passwd FROM pg_shadow"""
"""PostgreSQL output: 'HsYIBS'||COALESCE(CAST(usename AS CHARACTER(10000)), ' ')||'KTBfZp'||COALESCE(CAST(passwd AS CHARACTER(10000)), ' ')||'LkhmuP' FROM pg_shadow"""
"""Oracle input: SELECT COLUMN_NAME, DATA_TYPE FROM SYS.ALL_TAB_COLUMNS WHERE TABLE_NAME='USERS'"""
"""Oracle output: 'GdBRAo'||NVL(CAST(COLUMN_NAME AS VARCHAR(4000)), ' ')||'czEHOf'||NVL(CAST(DATA_TYPE AS VARCHAR(4000)), ' ')||'JVlYgS' FROM SYS.ALL_TAB_COLUMNS WHERE TABLE_NAME='USERS'"""
"""Microsoft SQL Server input: SELECT name, master.dbo.fn_varbintohexstr(password) FROM master..sysxlogins"""
"""Microsoft SQL Server output: 'QQMQJO'+ISNULL(CAST(name AS VARCHAR(8000)), ' ')+'kAtlqH'+ISNULL(CAST(master.dbo.fn_varbintohexstr(password) AS VARCHAR(8000)), ' ')+'lpEqoi' FROM master..sysxlogins"""
"""@param query: query string to be processed"""
"""@type query: C{str}"""
"""@return: query string nulled, casted and concatenated"""
"""@rtype: C{str}"""
if unpack:
concatenatedQuery = ""
query = query.replace(", ", ',')
fieldsSelectFrom, fieldsSelect, fieldsNoSelect, fieldsSelectTop, fieldsSelectCase, _, fieldsToCastStr, fieldsExists = self.getFields(query)
castedFields = self.nullCastConcatFields(fieldsToCastStr)
concatenatedQuery = query.replace(fieldsToCastStr, castedFields, 1)
else:
return query
<CHANGES>
if Backend.isDbms(DBMS.MYSQL):
<CHANGEE>
if fieldsExists:
concatenatedQuery = concatenatedQuery.replace("SELECT ", "CONCAT('%s'," % kb.chars.start, 1)
concatenatedQuery += ",'%s')" % kb<SCANS>@return: field string nulled and casted"""
"""@rtype: C{str}"""
nulledCastedField = field
if field:
rootQuery = queries[Backend.getIdentifiedDbms()]
if field.startswith("(CASE") or field.startswith("(IIF") or conf.noCast:
nulledCastedField = field
else:
if not (Backend.isDbms(DBMS.SQLITE) and not isDBMSVersionAtLeast('3')):
nulledCastedField = rootQuery.cast.query % field
if Backend.getIdentifiedDbms() in (DBMS.ACCESS,):
nulledCastedField = rootQuery.isnull.query % (nulledCastedField, nulledCastedField)
else:
nulledCastedField = rootQuery.isnull.query % nulledCastedField
if conf.hexConvert or conf.binaryFields and field in conf.binaryFields.split(','):
nulledCastedField = self.hexConvertField(nulledCastedField)
return nulledCastedField
def nullCastConcatFields(self, fields):
"""Take in input a sequence of fields string and return its processed"""
"""nulled, casted and concatenated fields string."""
"""Examples:"""
"""MySQL input: user,password"""
"""MySQL output: IFNULL(CAST(user AS CHAR(10000)), ' '),'UWciUe',IFNULL(CAST(password AS CHAR(10000)), ' ')"""
"""MySQL scope: SELECT user, password FROM mysql.user"""
"""PostgreSQL input: usename,passwd"""
"""PostgreSQL output: COALESCE(CAST(usename AS CHARACTER(10000)), ' ')||'xRBcZW'||COALESCE(CAST(passwd AS CHARACTER(10000)), ' ')"""
"""PostgreSQL scope: SELECT usename, passwd FROM pg_shadow"""
"""Oracle input: COLUMN_NAME,DATA_TYPE"""
"""Oracle output: NVL(CAST(COLUMN_NAME AS VARCHAR(4000)), ' ')||'UUlHUa'||NVL(CAST(DATA_TYPE AS VARCHAR(4000)), ' ')"""
"""Oracle scope:
<FILEB>
<CHANGES>
from"+info["commit"]["message"]+". \nPlease update, it may resolve some issues!")
<CHANGEE>
<FILEE>
<FILEB>
raise tornado.web.HTTPError(412, "Empty message")
except IOError as e:
cprint("\n")
cprint("I/O error at event writing: ({0}): {1}".format(e.errno, e.strerror))
cprint("\n")
class AutoUpdaterHandler(custom_handlers.APIRequestHandler):
"""* Notify on the home page if the repo is at its latest commit from upstream"""
SUPPORTED_METHODS = ['GET']
@tornado.gen.coroutine
def get(self):
client = tornado.httpclient.AsyncHTTPClient()
response = yield client.fetch("https://api.github.com/repos/owtf/owtf/commits/develop",
user_agent='OWTF')
info = json.loads(response.body)
root_dir = self.get_component("config").RootDir
# now compare the commit_hash with the latest tag
if print_version(root_dir, commit_hash=True) != info["sha"]:
self.write("Seems that your repository is older than the upstream. The lastest commit is \
<CHANGES>
from"+info["commit"]["messsage"]+". \nPlease update, it may resolve some issues!")
<CHANGEE>
else:
self.write('Seems like you are running latest version. Happy Pwning!')
<FILEE>
<SCANS>_METHODS = ['GET']
def get(self):
try:
criteria = dict(self.request.arguments)
criteria["search"] = True
self.write(self.get_component("worklist_manager").search_all(criteria))
except exceptions.InvalidParameterType:
raise tornado.web.HTTPError(400)
class ConfigurationHandler(custom_handlers.APIRequestHandler):
SUPPORTED_METHODS = ('GET', 'PATCH')
def get(self):
filter_data = dict(self.request.arguments)
self.write(self.get_component("db_config").GetAll(filter_data))
def patch(self):
for key, value_list in self.request.arguments.items():
try:
self.get_component("db_config").Update(key, value_list[0])
except exceptions.InvalidConfigurationReference:
raise tornado.web.HTTPError(400)
class ErrorDataHandler(custom_handlers.APIRequestHandler):
SUPPORTED_METHODS = ('GET', 'PATCH', 'DELETE')
def get(self, error_id=None):
if error_id is None:
filter_data = dict(self.request.arguments)
self.write(self.get_component("db_error").GetAll(filter_data))
else:
try:
self.write(self.get_component("db_error").Get(error_id))
except exceptions.InvalidErrorReference:
raise tornado.web.HTTPError(400)
def patch(self, error_id=None):
if error_id is None:
raise tornado.web.HTTPError(400)
if self.request.arguments.get_argument("user_message", default=None):
raise tornado.web.HTTPError(400)
self.get_component("db_error").Update(
error_id,
self.request.arguments.get_argument("user_message"))
def delete(self, error_id=None):
if error_id is None:
raise tornado.web.HTTPError(400)
try:
self.get_component("db_error").Delete(error_id)
except exceptions.InvalidErrorReference:
raise tornado.web.HTTPError(400)
class PlugnhackHandler(custom_handlers.APIRequestHandler):
"""API handler for Plug-n-Hack. Purpose of this handler is to catch"""
"""parameters defining actions (or/and) state that were sent from Plug-n-Hack"""
"""commands invoked in browser, validate them, then send to proxy Plug-n-Hack"""
"""Handler that will process received information and take action corresponding to received information (i.e inject probe into a target, start/stop monitor a target)"""
SUPPORTED_METHODS = ['POST']
# PnH API Handler must accept and send only an action that is a member of VALID_ACTIONS group
VALID_ACTIONS = ["probe","monitor","oracle","startMonitoring","stopMonitoring"]
def post(self):
# Extract useful information from POST request and store it as a dictionary data structure
self.message = dict(self.request.arguments)
# Extract value of url, action and state request parameters
<FILEB>
<CHANGES>
elif re.match("^<[\w-]+>$", text):
<CHANGEE>
<FILEE>
<FILEB>
return "token"
elif text[0:1] == ":":
return "selector"
elif text[-2:] == "()" and not (dfn.get('id') or '').startswith("dom-"):
return "function"
else:
return "dfn"
def determineLinkType(el):
# 1. Look at data-link-type
linkType = treeAttr(el, 'data-link-type')
if linkType:
if linkType in config.linkTypes:
return linkType
die("Unknown link type '{0}' on:\n{1}", linkType, outerHTML(el))
# 2. Introspect on the text
text = textContent(el)
if text[0:1] == "@":
return "at-rule"
<CHANGES>
elif text[0:1] == "<" and text[-1:] == ">":
<CHANGEE>
return "type"
elif text[:1] == u"���" and text[-1:] == u"���":
return "token"
elif text[0:1] == ":":
return "selector"
elif text[-2:] == "()":
return "functionish"
else:
return "dfn"
def classifyDfns(doc):
dfnTypeToPrefix = {v:k for k,v in config.dfnClassToType.items()}
for el in findAll("dfn"):
<FILEE>
<SCANS> doc.lines[i] = match.group(2)
replacements.append({
'start': startLine,
'end': i,
'value': blockTypes[blockType](
lines=doc.lines[startLine+1:i],
tagName=tagName,
firstLine=doc.lines[startLine],
doc=doc)})
else:
# End tag was at the end of line of useful content.
# Trim this line to be only the block content.
doc.lines[i] = match.group(1)
# Put the after-tag content on the next line.
doc.lines.insert(i+1, match.group(2))
replacements.append({
'start': startLine,
'end': i+1,
'value': blockTypes[blockType](
lines=doc.lines[startLine+1:i+1],
tagName=tagName,
firstLine=doc.lines[startLine],
doc=doc)})
tagName = ""
blockType = ""
# Make the replacements, starting from the bottom up so I
# don't have to worry about offsets becoming invalid.
for rep in reversed(replacements):
doc.lines[rep['start']:rep['end']] = rep['value']
def transformPre(lines, tagName, firstLine, **kwargs):
prefix = re.match("\s*", firstLine).group(0)
for (i, line) in enumerate(lines):
# Remove the whitespace prefix from each line.
match = re.match(prefix+"(.*)", line, re.DOTALL)
if match:
lines[i] = match.group(1)
# Use tabs in the source, but spaces in the output,
# because tabs are ginormous in HTML.
# Also, it means lines in processed files will never
# accidentally match a prefix.
lines[i] = lines[i].replace("\t", " ")
lines.insert(0, firstLine)
lines.append("</"+tagName+">")
return lines
def transformPropdef(lines, doc, **kwargs):
ret = ["<table class='propdef'>"]
for (i, line) in enumerate(lines):
match = re.match("\s*([^:]+):\s*(.*)", line)
key = match.group(1)
val = match.group(2)
ret.append("<tr><th>" + key + ":<td>" + val)
ret.append("</table>")
return ret
def transformDescdef(lines, doc, **kwargs):
name = None
descFor = None
ret = []
for (i, line) in enumerate(lines):
match = re.match("\s*([^:]+):\s*(.*)", line)
key = match.group(1).strip()
val = match.group(2).strip()
if key == "Name":
name = val
elif key == "For":
descFor = val
val = "<a at-rule>{0}</a>".format(val)
ret.append("<tr><th>" + key + ":<td>" + val)
ret.append("</table>")
if descFor is
<FILEB>
<CHANGES>
files = [f[1] for f in GetFilesNotInCL()]
<CHANGEE>
<FILEE>
<FILEB>
if not len(change_info._files):
print change_info.name
change_info.Delete()
return 0
@no_args
def CMDnothave():
"""Lists files unknown to Subversion."""
for filename in UnknownFiles():
print "? " + "".join(filename)
return 0
@attrs(usage='<svn options>')
def CMDdiff(args):
"""Diffs all files in the changelist or all files that aren't in a CL."""
files = None
if args:
change_info = ChangeInfo.Load(args.pop(0), GetRepositoryRoot(), True, True)
files = change_info.GetFileNames()
else:
<CHANGES>
files = GetFilesNotInCL()
<CHANGEE>
root = GetRepositoryRoot()
cmd = ['svn', 'diff']
cmd.extend([os.path.join(root, x) for x in files])
cmd.extend(args)
return RunShellWithReturnCode(cmd, print_output=True)[1]
@no_args
def CMDsettings():
"""Prints code review settings for this checkout."""
# Force load settings
GetCodeReviewSetting("UNKNOWN");
del CODEREVIEW_SETTINGS['__just_initialized']
print '\n'.join(("%s: %s" % (str(k), str(v))
<FILEE>
<SCANS> SVN.CaptureInfo('.')
if not svn_info:
ErrorExit("Current checkout is unversioned. Please retry with a versioned "
"directory.")
if len(args) == 2:
f = open(args[1], 'rU')
override_description = f.read()
f.close()
else:
override_description = None
if change_info.issue and not change_info.NeedsUpload():
try:
description = GetIssueDescription(change_info.issue)
except urllib2.HTTPError, err:
if err.code == 404:
# The user deleted the issue in Rietveld, so forget the old issue id.
description = change_info.description
change_info.issue = 0
change_info.Save()
else:
ErrorExit("Error getting the description from Rietveld: " + err)
else:
if override_description:
description = override_description
else:
description = change_info.description
other_files = GetFilesNotInCL()
# Edited files (as opposed to files with only changed properties) will have
# a letter for the first character in the status string.
file_re = re.compile(r"^[a-z].+\Z", re.IGNORECASE)
affected_files = [x for x in other_files if file_re.match(x[0])]
unaffected_files = [x for x in other_files if not file_re.match(x[0])]
separator1 = ("\n---All lines above this line become the description.\n"
"---Repository Root: " + change_info.GetLocalRoot() + "\n"
"---Paths in this changelist (" + change_info.name + "):\n")
separator2 = "\n\n---Paths modified but not in any changelist:\n\n"
text = (description + separator1 + '\n' +
'\n'.join([f[0] + f[1] for f in change_info.GetFiles()]))
if change_info.Exists():
text += (separator2 +
'\n'.join([f[0] + f[1] for f in affected_files]) + '\n')
else:
text += ('\n'.join([f[0] + f[1] for f in affected_files]) + '\n' +
separator2)
text += '\n'.join([f[0] + f[1] for f in unaffected_files]) + '\n'
handle, filename = tempfile.mkstemp(text=True)
os.write(handle, text)
os.close(handle)
if not silent:
os.system(GetEditor() + " " + filename)
result = gclient_utils.FileRead(filename, 'r')
os.remove(filename)
if not result:
return 0
split_result = result.split(separator1, 1)
if len(split_result) != 2:
ErrorExit("Don't modify the text starting with ---!\n\n" + result)
# Update the CL description if it has changed.
new_description = split_result[0]
cl
<FILEB>
<CHANGES>
self.assertContains(response, b"Currently")
<CHANGEE>
<FILEE>
<FILEB>
self.client.logout()
def test_inline_file_upload_edit_validation_error_post(self):
"""Test that inline file uploads correctly display prior data (#10002)."""
post_data = {
"name": "Test Gallery",
"pictures-TOTAL_FORMS": "2",
"pictures-INITIAL_FORMS": "1",
"pictures-MAX_NUM_FORMS": "0",
"pictures-0-id": six.text_type(self.picture.id),
"pictures-0-gallery": six.text_type(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post('/test_admin/%s/admin_views/gallery/%d/' % (self.urlbit, self.gallery.id), post_data)
<CHANGES>
self.assertTrue(response._container[0].find("Currently:") > -1)
<CHANGEE>
@override_settings(PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class AdminInlineTests(TestCase):
urls = "admin_views.urls"
fixtures = ['admin-views-users.xml']
def setUp(self):
self.post_data = {
"name": "Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": "0",
"widget_set-MAX_NUM_FORMS": "0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
<FILEE>
<SCANS> popups."""
response = self.client.get('/test_admin/admin/admin_views/person/')
self.assertNotEqual(response.context['cl'].list_editable, ())
response = self.client.get('/test_admin/admin/admin_views/person/?%s' % IS_POPUP_VAR)
self.assertEqual(response.context['cl'].list_editable, ())
def test_pk_hidden_fields(self):
"""Ensure that hidden pk fields aren't displayed in the table body and"""
"""that their corresponding human-readable value is displayed instead."""
"""Note that the hidden pk fields are in fact be displayed but"""
"""separately (not in the table), and only once."""
"""Refs #12475."""
story1 = Story.objects.create(title='The adventures of Guido', content='Once upon a time in Djangoland...')
story2 = Story.objects.create(title='Crouching Tiger, Hidden Python', content='The Python was sneaking into...')
response = self.client.get('/test_admin/admin/admin_views/story/')
self.assertContains(response, 'id="id_form-0-id"', 1) # Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(response, '<div class="hiddenfields">\n<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" />\n</div>' % (story2.id, story1.id), html=True)
self.assertContains(response, '<td class="field-id">%d</td>' % story1.id, 1)
self.assertContains(response, '<td class="field-id">%d</td>' % story2.id, 1)
def test_pk_hidden_fields_with_list_display_links(self):
"""Similarly as test_pk_hidden_fields, but when the hidden pk fields are"""
"""referenced in list_display_links."""
"""Refs #12475."""
story1 = OtherStory.objects.create(title='The adventures of Guido', content='Once upon a time in Djangoland...')
story2 = OtherStory.objects.create(title='Crouching Tiger, Hidden Python', content='The Python was sneaking into...')
link1 = reverse('admin:admin
<FILEB>
<CHANGES>
values = np.arange(n, dtype=np.int64)
<CHANGEE>
<FILEE>
<FILEB>
if isinstance(key, np.ndarray) and key.dtype == np.object_:
key = np.asarray(key)
if not lib.is_bool_array(key):
if isnull(key).any():
raise ValueError('cannot index with vector containing '
'NA / NaN values')
return False
return True
elif isinstance(key, np.ndarray) and key.dtype == np.bool_:
return True
elif isinstance(key, list):
try:
return np.asarray(key).dtype == np.bool_
except TypeError: # pragma: no cover
return False
return False
def _default_index(n):
from pandas.core.index import Int64Index
<CHANGES>
values = np.arange(n)
<CHANGEE>
result = values.view(Int64Index)
result.name = None
return result
def ensure_float(arr):
if issubclass(arr.dtype.type, np.integer):
arr = arr.astype(float)
return arr
def _mut_exclusive(arg1, arg2):
if arg1 is not None and arg2 is not None:
raise Exception('mutually exclusive arguments')
elif arg1 is not None:
return arg1
<FILEE>
<SCANS> n >= 0 else slice(-n, None)
lag_indexer = tuple(lag_indexer)
out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]
return out_arr
def take_fast(arr, indexer, mask, needs_masking, axis=0, out=None,
fill_value=np.nan):
if arr.ndim == 2:
return take_2d(arr, indexer, out=out, mask=mask,
needs_masking=needs_masking,
axis=axis, fill_value=fill_value)
indexer = _ensure_platform_int(indexer)
result = ndtake(arr, indexer, axis=axis, out=out)
result = _maybe_mask(result, mask, needs_masking, axis=axis,
out_passed=out is not None, fill_value=fill_value)
return result
def _maybe_mask(result, mask, needs_masking, axis=0, out_passed=False,
fill_value=np.nan):
if needs_masking:
if out_passed and _need_upcast(result):
raise Exception('incompatible type for NAs')
else:
# a bit spaghettified
result = _maybe_upcast(result)
mask_out_axis(result, mask, axis, fill_value)
return result
def _maybe_upcast(values):
if issubclass(values.dtype.type, np.integer):
values = values.astype(float)
elif issubclass(values.dtype.type, np.bool_):
values = values.astype(object)
return values
def _need_upcast(values):
if issubclass(values.dtype.type, (np.integer, np.bool_)):
return True
return False
def _interp_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, mask, limit=None):
view = arr.view(wrap_dtype)
f(view, mask, limit=limit)
return wrapper
_pad_1d_datetime = _interp_wrapper(_algos.pad_inplace_int64, np.int64)
_pad_2d_datetime = _interp_wrapper(_algos.pad_2d_inplace_int64, np.int64)
_backfill_1d_datetime = _interp_wrapper(_algos.backfill_inplace_int64,
np.int64)
_backfill_2d_datetime = _interp_wrapper(_algos.backfill_2d_inplace_int64,
np.int64)
def pad_1d(values, limit=None, mask=None):
if is_float_dtype(values):
_method = _algos.pad_inplace_float64
elif is_datetime64_dtype(values):
_method = _pad_1d_datetime
elif values.dtype == np.object_:
_method = _algos.pad_inplace_object
else: # pragma: no cover
raise ValueError('Invalid dtype for padding')
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
<FILEB>
<CHANGES>
from os.path import join, splitext
<CHANGEE>
<FILEE>
<FILEB>
#coding: utf-8
import os
import calendar
from datetime import datetime, timedelta
from uuid import uuid4
import hashlib
<CHANGES>
from os.path import join
<CHANGEE>
from thumbor.storages import BaseStorage
from thumbor.utils import logger
from boto.s3.connection import S3Connection
from boto.s3.bucket import Bucket
from dateutil.parser import parse as parse_ts
class Storage(BaseStorage):
__connection = None
def __get_s3_connection(self):
if self.__connection is None:
self.__connection = S3Connection(self.context.config.AWS_ACCESS_KEY,self.context.config.AWS_SECRET_KEY)
return self.__connection
def __get_s3_bucket(self):
<FILEE>
<SCANS> return Bucket(
connection=self.__get_s3_connection(),
name=self.context.config.RESULT_STORAGE_BUCKET
)
def put(self, path, bytes):
file_abspath = self.normalize_path(path)
logger.debug("[RESULT_STORAGE] putting s3 key at %s" % (file_abspath))
bucket = self.__get_s3_bucket()
file_key = bucket.get_key(file_abspath)
if not file_key:
file_key = bucket.new_key(file_abspath)
file_key.set_contents_from_string(bytes)
def put_crypto(self, path):
if not self.context.config.STORES_CRYPTO_KEY_FOR_EACH_IMAGE:
return
file_abspath = self.normalize_path(path)
if not self.context.server.security_key:
raise RuntimeError("STORES_CRYPTO_KEY_FOR_EACH_IMAGE can't be True if no SECURITY_KEY specified")
crypto_path = '%s.txt' % splitext(file_abspath)[0]
bucket = self.__get_s3_bucket()
file_key = bucket.get_key(crypto_path)
if not file_key:
file_key = bucket.new_key(file_abspath)
file_key.set_contents_from_string(self.context.server.security_key)
return file_abspath
def put_detector_data(self, path, data):
file_abspath = self.normalize_path(path)
path = '%s.detectors.txt' % splitext(file_abspath)[0]
bucket = self.__get_s3_bucket()
file_key = bucket.get_key(path)
if not file_key:
file_key = bucket.new_key(file_abspath)
file_key.set_contents_from_string(self.context.server.security_key)
return file_abspath
def get_crypto(self, path):
file_abspath = self.normalize_path(path)
crypto_file = "%s.txt" % (splitext(file_abspath)[0])
bucket = self.__get_s3_bucket()
file_key = bucket.get_key(crypto_path)
if not file_key:
return None
return file_key.read()
def get(self, path):
file_abspath = self.normalize_path(path)
logger.debug("[RESULT_STORAGE] getting from s3 key %s" % file_abspath)
bucket = self.__get_s3_bucket()
file_key = bucket.get_key(file_abspath)
if not file_key or self.is_expired(file_abspath):
logger.debug("[RESULT_STORAGE] s3 key not found at %s" % file_abspath)
return None
return file_key.read()
def get_detector_data(self, path):
file_abspath = self.normalize_path(path)
path = '%s.detectors.txt' % splitext(file_abspath)[0]
bucket = self.__get_s3_bucket()
file_key = bucket.get_key(file_abspath)
if not file_key or self.is_expired(file_abspath):
return None
return file_key.read()
def exists(self, path):
bucket = self.__get_s3_bucket()
file_abspath = self.normalize_path(path)
file_key = bucket.get_key(file_abspath)
if not file_key:
return False
return True
def normalize_path(self, path):
digest = hashlib.sha1(path.encode('utf
<FILEB>
<CHANGES>
with self.assertRaises(RuntimeError):
<CHANGEE>
<FILEE>
<FILEB>
pm.initialize()
pm.finalize()
def test_run(self):
mod = self.module()
fn = mod.get_function("sum")
pm = self.pm(mod)
self.pmb().populate(pm)
mod.close()
orig_asm = str(fn)
pm.initialize()
pm.run(fn)
pm.finalize()
opt_asm = str(fn)
# Quick check that optimizations were run
self.assertIn("%.4", orig_asm)
self.assertNotIn("%.4", opt_asm)
class TestDylib(BaseTest):
def test_bad_library(self):
<CHANGES>
with self.assertRaises(Exception):
<CHANGEE>
dylib.load_library_permanently("zzzasdkf;jasd;l")
@unittest.skipUnless(platform.system() in ["Linux", "Darwin"],
"Unsupport test for current OS")
def test_libm(self):
system = platform.system()
if system == "Linux":
libm = find_library("m")
elif system == "Darwin":
libm = find_library("libm")
dylib.load_library_permanently(libm)
if __name__ == "__main__":
unittest.main()
<FILEE>
<SCANS>RuntimeError) as cm:
dest.link_in(src)
self.assertIn("symbol multiply defined", str(cm.exception))
def test_as_bitcode(self):
mod = self.module()
bc = mod.as_bitcode()
# Refer to http://llvm.org/docs/doxygen/html/ReaderWriter_8h_source.html#l00064
# and http://llvm.org/docs/doxygen/html/ReaderWriter_8h_source.html#l00092
bitcode_wrapper_magic = b'\xde\xc0\x17\x0b'
bitcode_magic = b'BC'
self.assertTrue(bc.startswith(bitcode_magic) or
bc.startswith(bitcode_wrapper_magic))
def test_parse_bitcode_error(self):
with self.assertRaises(RuntimeError) as cm:
llvm.parse_bitcode(b"")
self.assertIn("LLVM bitcode parsing error", str(cm.exception))
self.assertIn("Invalid bitcode signature", str(cm.exception))
def test_bitcode_roundtrip(self):
bc = self.module().as_bitcode()
mod = llvm.parse_bitcode(bc)
self.assertEqual(mod.as_bitcode(), bc)
mod.get_function("sum")
mod.get_global_variable("glob")
def test_cloning(self):
m = self.module()
cloned = m.clone()
self.assertIsNot(cloned, m)
self.assertEqual(cloned.as_bitcode(), m.as_bitcode())
class JITTestMixin(object):
"""Mixin for ExecutionEngine tests."""
def test_run_code(self):
mod = self.module()
with self.jit(mod) as ee:
ee.finalize_object()
cfptr = ee.get_pointer_to_global(mod.get_function('sum'))
cfunc = CFUNCTYPE(c_int, c_int, c_int)(cfptr)
res = cfunc(2, -5)
self.assertEqual(-3, res)
def test_close(self):
ee = self.jit(self.module())
ee.close()
ee.close()
with self.assertRaises(ctypes.ArgumentError):
ee.finalize_object()
def test_with(self):
ee = self.jit(self.module())
with ee:
pass
with self.assertRaises(RuntimeError):
with ee:
pass
with self.assertRaises(ctypes.ArgumentError):
ee.finalize_object()
def test_module_lifetime(self):
mod = self.module()
ee = self.jit(mod)
ee.close()
mod.close()
def test_module_lifetime2(self):
mod = self.module()
ee = self.jit(mod)
mod.close()
ee.close()
def test_add_module(self):
ee = self.jit(self.module())
mod = self.module(asm_mul)
ee.add_module(mod)
with self.assertRaises(KeyError):
ee.add_module(mod)
self.assertFalse(mod.
<FILEB>
<CHANGES>
index = self.data.index(finfo)
<CHANGEE>
<FILEE>
<FILEB>
editor.setFocus()
def new(self, filename, encoding, text):
"""Create new filename with *encoding* and *text*"""
finfo = self.create_new_editor(filename, encoding, text,
set_current=False, new=True)
finfo.editor.set_cursor_position('eof')
finfo.editor.insert_text(os.linesep)
return finfo
def load(self, filename, set_current=True):
"""Load filename, create an editor instance and return it"""
"""*Warning* This is loading file, creating editor but not executing"""
"""the source code analysis -- the analysis must be done by the editor"""
"""plugin (in case multiple editorstack instances are handled)"""
filename = osp.abspath(unicode(filename))
self.emit(SIGNAL('starting_long_process(QString)'),
_("Loading %s...") % filename)
text, enc = encoding.read(filename)
finfo = self.create_new_editor(filename, enc, text, set_current)
<CHANGES>
index = self.get_stack_index()
<CHANGEE>
self._refresh_outlineexplorer(index, update=True)
self.emit(SIGNAL('ending_long_process(QString)'), "")
if self.isVisible() and self.checkeolchars_enabled \
and sourcecode.has_mixed_eol_chars(text):
name = osp.basename(filename)
QMessageBox.warning(self, self.title,
_("<b>%s</b> contains mixed end-of-line "
"characters.<br>Spyder will fix this "
"automatically.") % name,
QMessageBox.Ok)
self.set_os_eol_chars(index)
self.is_analysis_done = False
<FILEE>
<SCANS> finfo.editor.set_calltips(state)
def set_go_to_definition_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'go_to_definition')
self.go_to_definition_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_go_to_definition_enabled(state)
def set_close_parentheses_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'close_parentheses')
self.close_parentheses_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_close_parentheses_enabled(state)
def set_close_quotes_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'close_quotes')
self.close_quotes_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_close_quotes_enabled(state)
def set_add_colons_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'add_colons')
self.add_colons_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_add_colons_enabled(state)
def set_auto_unindent_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'auto_unindent')
self.auto_unindent_enabled = state
if self.data:
for finfo in self.data:
finfo.editor.set_auto_unindent_enabled(state)
def set_indent_chars(self, indent_chars):
# CONF.get(self.CONF_SECTION, 'indent_chars')
indent_chars = indent_chars[1:-1] # removing the leading/ending '*'
self.indent_chars = indent_chars
if self.data:
for finfo in self.data:
finfo.editor.set_indent_chars(indent_chars)
def set_tab_stop_width(self, tab_stop_width):
# CONF.get(self.CONF_SECTION, 'tab_stop_width')
self.tab_stop_width = tab_stop_width
if self.data:
for finfo in self.data:
finfo.editor.setTabStopWidth(tab_stop_width)
def set_inspector_enabled(self, state):
self.inspector_enabled = state
def set_outlineexplorer_enabled(self, state):
# CONF.get(self.CONF_SECTION, 'outline_
<FILEB>
<CHANGES>
user_gateways = storage.list_user_gateways_by_volume( volume.volume_id )
<CHANGEE>
<FILEE>
<FILEB>
ug_read_start = storagetypes.get_time()
UG = get_UG( username, password )
ug_read_time = storagetypes.get_time() - ug_read_start
if UG == None:
# no UG
self.response.status = 403
self.response.write("Authorization Failed\n")
return
# authenticate the requesting UG
valid_UG = UG.authenticate( password )
if not valid_UG:
# invalid credentials
self.response.status = 403
self.response.write("Authorization Failed\n")
return
# if we're still here, we're good to go
# request for volume metadata
volume_metadata = ms_pb2.ms_volume_metadata();
<CHANGES>
user_gateways = UserGateway.ListAll( volume.volume_id )
<CHANGEE>
volume.protobuf( volume_metadata, user_gateways )
volume_metadata.requester_id = UG.owner_id
data = volume_metadata.SerializeToString()
self.response.status = 200
self.response.headers['X-Volume-Time'] = str(volume_read_time)
self.response.headers['X-UG-Time'] = str(ug_read_time)
self.response.headers['X-Total-Time'] = str( storagetypes.get_time() - volume_request_start )
self.response.write( data )
return
class MSFileRequestHandler(webapp2.RequestHandler):
"""Volume file request handler."""
"""It will read and list metadata entries via GET."""
<FILEE>
<SCANS>#!/usr/bin/env python
"""Copyright 2013 The Trustees of Princeton University"""
"""All Rights Reserved"""
import webapp2
import urlparse
import MS
from MS.methods.resolve import Resolve
import protobufs.ms_pb2 as ms_pb2
from storage import storage
import storage.storagetypes as storagetypes
from entry import MSEntry
from volume import Volume
from gateway import UserGateway
import errno
import logging
import random
import os
import base64
import urllib
import time
import datetime
HTTP_MS_LASTMOD = "X-MS-LastMod"
def get_client_lastmod( headers ):
lastmod = headers.get( HTTP_MS_LASTMOD )
if lastmod == None:
return None
try:
lastmod_f = float( lastmod )
return lastmod_f
except:
return None
def read_basic_auth( headers ):
basic_auth = headers.get("Authorization")
if basic_auth == None:
logging.info("no authorization header")
return (None, None)
username, password = '', ''
try:
user_info = base64.decodestring( basic_auth[6:] )
username, password = user_info.split(":")
except:
logging.info("incomprehensible Authorization header: '%s'" % basic_auth )
return (None, None)
return username, password
def get_UG( username, password ):
if username == None or password == None:
# invalid header
return None
UG = UserGateway.Read( username )
return UG
class MSVolumeRequestHandler(webapp2.RequestHandler):
"""Volume metadata request handler."""
def get( self, volume_name ):
volume_request_start = storagetypes.get_time()
volume_read_start = storagetypes.get_time()
volume = Volume.Read( volume_name )
volume_read_time = storagetypes.get_time() - volume_read_start
if volume == None:
# no volume
self.response.status = 404
self.response.write("No such volume\n")
return
# authenticate the request to the Volume
authenticated = volume.authenticate_gateway( self.request.headers )
if not authenticated:
self.response.status = 403
self.response.write("Authorization Failed\n")
return
# get the UG's credentials
username, password = read_basic_auth( self.request.headers )
if username == None or password == None:
self.response.status = 401
self.response.write("Authentication Required\n")
return
# look up the requesting UG
"""It will create, delete, and update metadata entries via POST."""
def get( self, volume_name, path ):
file_request_
<FILEB>
<CHANGES>
table=self.output()._tablename)).fetchall()
<CHANGEE>
<FILEE>
<FILEB>
elif len(geometry_columns) > 1:
raise Exception('Having more than one geometry column in one table '
'could lead to problematic behavior ')
colname, colid = geometry_columns[0]
# Use SQL directly instead of SQLAlchemy because we need the_geom set
# on obs_table in this session
current_session().execute("UPDATE observatory.obs_table "
"SET the_geom = ST_GeomFromText('{the_geom}', 4326) "
"WHERE id = '{id}'".format(
the_geom=self.the_geom(output, colname),
id=output._id
))
generate_tile_summary(current_session(),
output._id, colid, output.table, colname)
def check_null_columns(self):
session = current_session()
result = session.execute("SELECT attname FROM pg_stats WHERE schemaname = 'observatory' "
"AND tablename = '{table}' AND null_frac = 1".format(
<CHANGES>
table=self.output().table)).fetchall()
<CHANGEE>
if result:
raise ValueError('The following columns of the table "{table}" contain only NULL values: {columns}'.format(
table=self.output().table, columns=', '.join([x[0] for x in result])))
def output(self):
#if self.deps() and not all([d.complete() for d in self.deps()]):
# raise Exception('Must run prerequisites first')
if not hasattr(self, '_columns'):
self._columns = self.columns()
tt = TableTarget(classpath(self),
underscore_slugify(self.task_id),
OBSTable(description=self.description(),
version=self.version(),
<FILEE>
<SCANS>, name, obs_table, columns, task):
'''columns: should be an ordereddict if you want to specify columns' order'''
'''in the table'''
self._id = '.'.join([schema, name])
obs_table.id = self._id
obs_table.tablename = 'obs_' + sha1(underscore_slugify(self._id)).hexdigest()
self.table = 'observatory.' + obs_table.tablename
self._tablename = obs_table.tablename
self._schema = schema
self._name = name
self._obs_table = obs_table
self._obs_dict = obs_table.__dict__.copy()
self._columns = columns
self._task = task
if obs_table.tablename in metadata.tables:
self._table = metadata.tables[obs_table.tablename]
else:
self._table = None
def sync(self):
'''Whether this data should be synced to carto. Defaults to True.'''
return True
def exists(self):
'''We always want to run this at least once, because we can always'''
'''regenerate tabular data from scratch.'''
session = current_session()
existing = self.get(session)
new_version = float(self._obs_table.version or 0.0)
if existing:
existing_version = float(existing.version or 0.0)
if existing in session:
session.expunge(existing)
else:
existing_version = 0.0
if existing and existing_version == new_version:
resp = session.execute(
'SELECT COUNT(*) FROM information_schema.tables '
"WHERE table_schema = '{schema}' "
" AND table_name = '{tablename}' ".format(
schema='observatory',
tablename=existing.tablename))
if int(resp.fetchone()[0]) == 0:
return False
resp = session.execute(
'SELECT row_number() over () '
'FROM "{schema}".{tablename} LIMIT 1 '.format(
schema='observatory',
tablename=existing.tablename))
return resp.fetchone() is not None
elif existing and existing_version > new_version:
raise Exception('Metadata version mismatch: cannot run task {task} '
'(id "{id}") '
'with ETL version ({etl}) older than what is in '
'DB ({db})'.format(task=self._task.task_id,
id=self._id,
etl=new_version,
db=existing_version))
return False
def get(self, session):
'''Return a copy of the underlying OBSTable in the specified session.'''
with session.no_autoflush:
return session.query(OBSTable).get(self._id
<FILEB>
<CHANGES>
index = self.data.index(finfo)
<CHANGEE>
<FILEE>
<FILEB>
editor.setFocus()
def new(self, filename, encoding, text):
"""Create new filename with *encoding* and *text*"""
finfo = self.create_new_editor(filename, encoding, text,
set_current=False, new=True)
finfo.editor.set_cursor_position('eof')
finfo.editor.insert_text(os.linesep)
return finfo
def load(self, filename, set_current=True):
"""Load filename, create an editor instance and return it"""
"""*Warning* This is loading file, creating editor but not executing"""
"""the source code analysis -- the analysis must be done by the editor"""
"""plugin (in case multiple editorstack instances are handled)"""
filename = osp.abspath(unicode(filename))
self.emit(SIGNAL('starting_long_process(QString)'),
_("Loading %s...") % filename)
text, enc = encoding.read(filename)
finfo = self.create_new_editor(filename, enc, text, set_current)
<CHANGES>
index = self.get_stack_index()
<CHANGEE>
self._refresh_outlineexplorer(index, update=True)
self.emit(SIGNAL('ending_long_process(QString)'), "")
if self.isVisible() and self.checkeolchars_enabled \
and sourcecode.has_mixed_eol_chars(text):
name = osp.basename(filename)
QMessageBox.warning(self, self.title,
_("<b>%s</b> contains mixed end-of-line "
"characters.<br>Spyder will fix this "
"automatically.") % name,
QMessageBox.Ok)
self.set_os_eol_chars(index)
self.is_analysis_done = False
<FILEE>
<SCANS> self.sender()
try:
close_splitter = self.count() == 1 and self.editorstack is None
except RuntimeError:
# editorsplitter has been destroyed (happens when closing a
# EditorMainWindow instance)
return
if close_splitter:
# editorsplitter just closed was the last widget in this QSplitter
self.close()
return
elif self.count() == 2 and self.editorstack:
# back to the initial state: a single editorstack instance,
# as a single widget in this QSplitter: orientation may be changed
self.editorstack.reset_orientation()
self.__give_focus_to_remaining_editor()
def split(self, orientation=Qt.Vertical):
self.setOrientation(orientation)
self.editorstack.set_orientation(orientation)
editorsplitter = EditorSplitter(self.parent(), self.plugin,
self.menu_actions,
register_editorstack_cb=self.register_editorstack_cb,
unregister_editorstack_cb=self.unregister_editorstack_cb)
self.addWidget(editorsplitter)
self.connect(editorsplitter, SIGNAL("destroyed()"),
lambda: self.editorsplitter_closed())
current_editor = editorsplitter.editorstack.get_current_editor()
if current_editor is not None:
current_editor.setFocus()
def iter_editorstacks(self):
editorstacks = [(self.widget(0), self.orientation())]
if self.count() > 1:
editorsplitter = self.widget(1)
editorstacks += editorsplitter.iter_editorstacks()
return editorstacks
def get_layout_settings(self):
"""Return layout state"""
splitsettings = []
for editorstack, orientation in self.iter_editorstacks():
clines = [finfo.editor.get_cursor_line_number()
for finfo in editorstack.data]
cfname = editorstack.get_current_filename()
splitsettings.append((orientation == Qt.Vertical, cfname, clines))
return dict(hexstate=str(self.saveState().toHex()),
sizes=self.sizes(), splitsettings=splitsettings)
def set_layout_settings(self, settings):
"""Restore layout state"""
splitsettings = settings.get('splitsettings')
if splitsettings is None:
return
splitter = self
editor = None
for index, (is_vertical, cfname, clines) in enumerate(splitsettings):
if index > 0:
splitter.split(Qt.Vertical if is_vertical else Qt.Horizontal)
splitter = splitter.widget(1)
editorstack = splitter.widget(0)
for index, finfo in enumerate(editorstack.data):
editor
<FILEB>
<CHANGES>
os.chdir(sys.prefix)
<CHANGEE>
<FILEE>
<FILEB>
from .._vendor.boltons.setutils import IndexedSet
from ..common.compat import NoneType, iteritems, itervalues, odict, on_win, string_types
from ..common.configuration import (Configuration, LoadError, MapParameter, PrimitiveParameter,
SequenceParameter, ValidationError)
from ..common.disk import conda_bld_ensure_dir
from ..common.path import expand
from ..common.platform import linux_get_libc_version
from ..common.url import has_scheme, path_to_url, split_scheme_auth_token
try:
from cytoolz.itertoolz import concat, concatv, unique
except ImportError: # pragma: no cover
from .._vendor.toolz.itertoolz import concat, concatv, unique
try:
os.getcwd()
except (IOError, OSError) as e:
if e.errno == ENOENT:
# FileNotFoundError can occur when cwd has been deleted out from underneath the process.
# To resolve #6584, let's go with setting cwd to $HOME, and see how far we get.
<CHANGES>
os.chdir(expand('~'))
<CHANGEE>
else:
raise
log = getLogger(__name__)
_platform_map = {
'linux2': 'linux',
'linux': 'linux',
'darwin': 'osx',
'win32': 'win',
'zos': 'zos',
}
non_x86_linux_machines = {
'armv6l',
<FILEE>
<SCANS>
def hg_cache(self):
path = join(self.croot, 'hg_cache')
conda_bld_ensure_dir(path)
return path
@property
def svn_cache(self):
path = join(self.croot, 'svn_cache')
conda_bld_ensure_dir(path)
return path
@property
def arch_name(self):
m = machine()
if self.platform == 'linux' and m in non_x86_linux_machines:
return m
else:
return _arch_names[self.bits]
@property
def conda_private(self):
return conda_in_private_env()
@property
def platform(self):
return _platform_map.get(sys.platform, 'unknown')
@property
def subdir(self):
if self._subdir:
return self._subdir
m = machine()
if m in non_x86_linux_machines:
return 'linux-%s' % m
elif self.platform == 'zos':
return 'zos-z'
else:
return '%s-%d' % (self.platform, self.bits)
@property
def subdirs(self):
return self._subdirs if self._subdirs else (self.subdir, 'noarch')
@memoizedproperty
def known_subdirs(self):
return frozenset(concatv(PLATFORM_DIRECTORIES, self.subdirs))
@property
def bits(self):
if self.force_32bit:
return 32
else:
return 8 * tuple.__itemsize__
@property
def root_dir(self):
# root_dir is an alias for root_prefix, we prefer the name "root_prefix"
# because it is more consistent with other names
return self.root_prefix
@property
def root_writable(self):
# rather than using conda.gateways.disk.test.prefix_is_writable
# let's shortcut and assume the root prefix exists
path = join(self.root_prefix, PREFIX_MAGIC_FILE)
if isfile(path):
try:
fh = open(path, 'a+')
except (IOError, OSError) as e:
log.debug(e)
return False
else:
fh.close()
return True
return False
@property
def envs_dirs(self):
if self.root_writable:
fixed_dirs = (
join(self.root_prefix, 'envs'),
join(self._user_data_dir, 'envs'),
join('~', '.conda', 'envs'),
)
else:
fixed_dirs = (
join(self._user_data_dir, 'envs'),
join(self.root_prefix, 'envs'),
join('~', '.conda', 'envs'),
)
return tuple(IndexedSet(expand(p) for p in concatv(self._envs_dirs, fixed_dirs)))
@property
def pk
<FILEB>
<CHANGES>
self.assertIn('user.bar', os.listxattr(dst))
<CHANGEE>
<FILEE>
<FILEB>
shutil._copyxattr(src, dst)
self.assertEqual(os.listxattr(src), os.listxattr(dst))
self.assertEqual(
os.getxattr(src, 'user.foo'),
os.getxattr(dst, 'user.foo'))
# check errors don't affect other attrs
os.remove(dst)
write_file(dst, 'bar')
os_error = OSError(errno.EPERM, 'EPERM')
def _raise_on_user_foo(fname, attr, val):
if attr == 'user.foo':
raise os_error
else:
orig_setxattr(fname, attr, val)
try:
orig_setxattr = os.setxattr
os.setxattr = _raise_on_user_foo
shutil._copyxattr(src, dst)
<CHANGES>
self.assertEqual(['user.bar'], os.listxattr(dst))
<CHANGEE>
finally:
os.setxattr = orig_setxattr
@support.skip_unless_symlink
@support.skip_unless_xattr
@unittest.skipUnless(hasattr(os, 'geteuid') and os.geteuid() == 0,
'root privileges required')
def test_copyxattr_symlinks(self):
# On Linux, it's only possible to access non-user xattr for symlinks;
# which in turn require root privileges. This test should be expanded
# as soon as other platforms gain support for extended attributes.
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
<FILEE>
<SCANS>_dir, 'test.txt'))
os.mkdir(os.path.join(src_dir, 'test_dir'))
write_file((src_dir, 'test_dir', 'test.txt'), '456')
self.assertRaises(Error, shutil.copytree, src_dir, dst_dir)
# a dangling symlink is ignored with the proper flag
dst_dir = os.path.join(self.mkdtemp(), 'destination2')
shutil.copytree(src_dir, dst_dir, ignore_dangling_symlinks=True)
self.assertNotIn('test.txt', os.listdir(dst_dir))
# a dangling symlink is copied if symlinks=True
dst_dir = os.path.join(self.mkdtemp(), 'destination3')
shutil.copytree(src_dir, dst_dir, symlinks=True)
self.assertIn('test.txt', os.listdir(dst_dir))
def _copy_file(self, method):
fname = 'test.txt'
tmpdir = self.mkdtemp()
write_file((tmpdir, fname), 'xxx')
file1 = os.path.join(tmpdir, fname)
tmpdir2 = self.mkdtemp()
method(file1, tmpdir2)
file2 = os.path.join(tmpdir2, fname)
return (file1, file2)
@unittest.skipUnless(hasattr(os, 'chmod'), 'requires os.chmod')
def test_copy(self):
# Ensure that the copied file exists and has the same mode bits.
file1, file2 = self._copy_file(shutil.copy)
self.assertTrue(os.path.exists(file2))
self.assertEqual(os.stat(file1).st_mode, os.stat(file2).st_mode)
@unittest.skipUnless(hasattr(os, 'chmod'), 'requires os.chmod')
@unittest.skipUnless(hasattr(os, 'utime'), 'requires os.utime')
def test_copy2(self):
# Ensure that the copied file exists and has the same mode and
# modification time bits.
file1, file2 = self._copy_file(shutil.copy2)
self.assertTrue(os.path.exists(file2))
file1_stat = os.stat(file1)
file2_stat = os.stat(file2)
self.assertEqual(file1_stat.st_mode, file2_stat.st_mode)
for attr in 'st_atime', 'st_mtime':
# The modification times may be truncated in the new file.
self.assertLessEqual(getattr(file1_stat, attr),
getattr(file2_stat, attr) + 1)
if hasattr(os, 'chflags') and
<FILEB>
<CHANGES>
_log.experimental("Opening new pull request for: %s", ', '.join(paths))
<CHANGEE>
<FILEE>
<FILEB>
remote_name = 'github_%s_%s' % (github_user, salt)
dry_run = build_option('dry_run') or build_option('extended_dry_run')
if not dry_run:
my_remote = git_repo.create_remote(remote_name, github_url)
res = my_remote.push(pr_branch)
if res:
if res[0].ERROR & res[0].flags:
raise EasyBuildError("Pushing branch '%s' to remote %s (%s) failed: %s",
pr_branch, my_remote, github_url, res[0].summary)
else:
_log.debug("Pushed branch %s to remote %s (%s): %s", pr_branch, my_remote, github_url, res[0].summary)
else:
raise EasyBuildError("Pushing branch '%s' to remote %s (%s) failed: empty result",
pr_branch, my_remote, github_url)
return file_info, git_repo, pr_branch, diff_stat
@only_if_module_is_available('git', pkgname='GitPython')
def new_pr(paths, title=None, descr=None, commit_msg=None):
"""Open new pull request using specified files."""
<CHANGES>
_log.experimental("Opening new pull request for with %s", paths)
<CHANGEE>
pr_branch_name = build_option('pr_branch_name')
pr_target_account = build_option('pr_target_account')
pr_target_repo = build_option('pr_target_repo')
# collect GitHub info we'll need
# * GitHub username to push branch to repo
# * GitHub token to open PR
github_user = build_option('github_user')
if github_user is None:
raise EasyBuildError("GitHub user must be specified to use --new-pr")
github_token = fetch_github_token(github_user)
if github_token is None:
raise EasyBuildError("GitHub token for user '%s' must be available to use --new-pr", github_user)
<FILEE>
<SCANS>python.org/pypi/keyring is not available."
else:
self.token = keyring.get_password(KEYRING_GITHUB_TOKEN, user)
if self.token is None:
tup = (KEYRING_GITHUB_TOKEN, user)
python_cmd = "import getpass, keyring; keyring.set_password(\"%s\", \"%s\", getpass.getpass())" % tup
msg = '\n'.join([
"Failed to obtain GitHub token for %s" % user,
"Use the following procedure to install a GitHub token in your keyring:",
"$ python -c '%s'" % python_cmd,
])
if self.token is None:
# failure, for some reason
_log.warning(msg)
else:
# success
_log.info("Successfully obtained GitHub token for user %s from keyring." % user)
def fetch_github_token(user):
"""Fetch GitHub token for specified user from keyring."""
return GithubToken(user).token
<FILEB>
<CHANGES>
self.assertEqual(sorted(six.iterkeys(d)), sorted(six.iterkeys(mvd)))
<CHANGEE>
<FILEE>
<FILEB>
d2 = copy_func(d1)
d2.update({"developers": "Groucho"})
self.assertEqual(d2["developers"], "Groucho")
self.assertEqual(d1["developers"], "Fred")
d1 = MultiValueDict({
"key": [[]]
})
self.assertEqual(d1["key"], [])
d2 = copy_func(d1)
d2["key"].append("Penguin")
self.assertEqual(d1["key"], ["Penguin"])
self.assertEqual(d2["key"], ["Penguin"])
def test_dict_translation(self):
mvd = MultiValueDict({
'devs': ['Bob', 'Joe'],
'pm': ['Rory'],
})
d = mvd.dict()
<CHANGES>
self.assertEqual(list(six.iterkeys(d)), list(six.iterkeys(mvd)))
<CHANGEE>
for key in six.iterkeys(mvd):
self.assertEqual(d[key], mvd[key])
self.assertEqual({}, MultiValueDict().dict())
class ImmutableListTests(SimpleTestCase):
def test_sort(self):
d = ImmutableList(range(10))
# AttributeError: ImmutableList object is immutable.
self.assertRaisesMessage(AttributeError,
'ImmutableList object is immutable.', d.sort)
self.assertEqual(repr(d), '(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)')
def test_custom_warning(self):
d = ImmutableList(range(10), warning="Object is immutable!")
<FILEE>
<SCANS>list('key2'), ['value2', 'value3'])
self.assertEqual(mm.getlist('key4'), ['value5', 'value6'])
self.assertEqual(mm.getlist('undefined'), [])
self.assertEqual(sorted(six.iterkeys(mm)), ['key1', 'key2', 'key4'])
self.assertEqual(len(list(six.itervalues(mm))), 3)
self.assertTrue('value1' in six.itervalues(mm))
self.assertEqual(sorted(six.iteritems(mm), key=lambda k: k[0]),
[('key1', 'value1'), ('key2', 'value3'),
('key4', 'value6')])
self.assertEqual([(k,mm.getlist(k)) for k in sorted(mm)],
[('key1', ['value1']),
('key2', ['value2', 'value3']),
('key4', ['value5', 'value6'])])
def test_bool_casting(self):
empty = MergeDict({}, {}, {})
not_empty = MergeDict({}, {}, {"key": "value"})
self.assertFalse(empty)
self.assertTrue(not_empty)
def test_key_error(self):
"""Test that the message of KeyError contains the missing key name."""
d1 = MergeDict({'key1': 42})
with six.assertRaisesRegex(self, KeyError, 'key2'):
d1['key2']
class MultiValueDictTests(SimpleTestCase):
def test_multivaluedict(self):
d = MultiValueDict({'name': ['Adrian', 'Simon'],
'position': ['Developer']})
self.assertEqual(d['name'], 'Simon')
self.assertEqual(d.get('name'), 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
self.assertEqual(sorted(list(six.iteritems(d))),
[('name', 'Simon'), ('position', 'Developer')])
self.assertEqual(sorted(list(six.iterlists(d))),
[('name', ['Adrian', 'Simon']),
('position', ['Developer'])])
six.assertRaisesRegex(self, MultiValueDictKeyError, 'lastname',
d.__getitem__, 'lastname')
self.assertEqual(d.get('lastname'), None)
self.assertEqual(d.get('lastname', 'nonexistent'), 'nonexistent')
self.assertEqual(d.getlist('lastname'), [])
self.assertEqual(d.getlist('doesnotexist', ['Adrian', 'Simon']),
['Adrian', 'Simon'])
d.setlist('lastname', ['Holovaty', 'Willison'])
self.assertEqual(d.getlist('lastname'), ['Holovaty', 'Willison'])
self.assertEqual(sorted(list(six.itervalues(d))),
['Developer', 'Simon', 'Willison'])
<FILEB>
<CHANGES>
libraries = self.detect_math_libs()
<CHANGEE>
<FILEE>
<FILEB>
if ffi_inc and ffi_lib:
ext.include_dirs.extend(ffi_inc)
ext.libraries.append(ffi_lib)
self.use_system_libffi = True
def _decimal_ext(self):
extra_compile_args = []
undef_macros = []
if '--with-system-libmpdec' in sysconfig.get_config_var("CONFIG_ARGS"):
include_dirs = []
libraries = [':libmpdec.so.2']
sources = ['_decimal/_decimal.c']
depends = ['_decimal/docstrings.h']
else:
srcdir = sysconfig.get_config_var('srcdir')
include_dirs = [os.path.abspath(os.path.join(srcdir,
'Modules',
'_decimal',
'libmpdec'))]
<CHANGES>
libraries = []
<CHANGEE>
sources = [
'_decimal/_decimal.c',
'_decimal/libmpdec/basearith.c',
'_decimal/libmpdec/constants.c',
'_decimal/libmpdec/context.c',
'_decimal/libmpdec/convolute.c',
'_decimal/libmpdec/crt.c',
'_decimal/libmpdec/difradix2.c',
'_decimal/libmpdec/fnt.c',
'_decimal/libmpdec/fourstep.c',
'_decimal/libmpdec/io.c',
'_decimal/libmpdec/memory.c',
<FILEE>
<SCANS>.library_dirs + [
'/lib64', '/usr/lib64',
'/lib', '/usr/lib',
]
inc_dirs = self.compiler.include_dirs + ['/usr/include']
else:
lib_dirs = self.compiler.library_dirs[:]
inc_dirs = self.compiler.include_dirs[:]
exts = []
missing = []
config_h = sysconfig.get_config_h_filename()
with open(config_h) as file:
config_h_vars = sysconfig.parse_config_h(file)
srcdir = sysconfig.get_config_var('srcdir')
# OSF/1 and Unixware have some stuff in /usr/ccs/lib (like -ldb)
if host_platform in ['osf1', 'unixware7', 'openunix8']:
lib_dirs += ['/usr/ccs/lib']
# HP-UX11iv3 keeps files in lib/hpux folders.
if host_platform == 'hp-ux11':
lib_dirs += ['/usr/lib/hpux64', '/usr/lib/hpux32']
if host_platform == 'darwin':
# This should work on any unixy platform ;-)
# If the user has bothered specifying additional -I and -L flags
# in OPT and LDFLAGS we might as well use them here.
#
# NOTE: using shlex.split would technically be more correct, but
# also gives a bootstrap problem. Let's hope nobody uses
# directories with whitespace in the name to store libraries.
cflags, ldflags = sysconfig.get_config_vars(
'CFLAGS', 'LDFLAGS')
for item in cflags.split():
if item.startswith('-I'):
inc_dirs.append(item[2:])
for item in ldflags.split():
if item.startswith('-L'):
lib_dirs.append(item[2:])
math_libs = self.detect_math_libs()
# XXX Omitted modules: gl, pure, dl, SGI-specific modules
#
# The following modules are all pretty straightforward, and compile
# on pretty much any POSIXish platform.
#
# array objects
exts.append( Extension('array', ['arraymodule.c']) )
shared_math = 'Modules/_math.o'
# complex math library functions
exts.append( Extension('cmath', ['cmathmodule.c'],
extra_objects=[shared_math],
depends=['_math.h', shared_math],
libraries=math_libs) )
# math library functions, e.g. sin()
exts.append( Extension('math', ['mathmodule.c'],
extra_objects=[shared_math],
depends=['_math.h', shared_math],
libraries=math_libs) )
# time libraries: librt may be needed for clock_gettime()
time_libs = []
lib = sysconfig.get_config_var('TIMEMODULE_LIB')
if lib:
time_libs
<FILEB>
<CHANGES>
if currency.company_id.id!= company_id:
<CHANGEE>
<FILEE>
<FILEB>
}
}
if type in ('in_invoice', 'in_refund'):
result['value']['partner_bank'] = bank_id
if payment_term != partner_payment_term:
if partner_payment_term:
to_update = self.onchange_payment_term_date_invoice(
cr,uid,ids,partner_payment_term,date_invoice)
result['value'].update(to_update['value'])
else:
result['value']['date_due'] = False
if partner_bank_id != bank_id:
to_update = self.onchange_partner_bank(cr, uid, ids, bank_id)
result['value'].update(to_update['value'])
return result
def onchange_currency_id(self, cr, uid, ids, curr_id, company_id):
if curr_id:
currency = self.pool.get('res.currency').browse(cr, uid, curr_id)
<CHANGES>
if currency.company_id != company_id:
<CHANGEE>
raise osv.except_osv(_('Configration Error !'),
_('Can not select currency that is not related to current company.\nPlease select accordingly !.'))
return {}
def onchange_payment_term_date_invoice(self, cr, uid, ids, payment_term_id, date_invoice):
if not payment_term_id:
return {}
res={}
pt_obj= self.pool.get('account.payment.term')
if not date_invoice :
date_invoice = time.strftime('%Y-%m-%d')
pterm_list = pt_obj.compute(cr, uid, payment_term_id, value=1, date_ref=date_invoice)
if pterm_list:
<FILEE>
<SCANS>:
line2 = {}
for x, y, l in line:
tmp = str(l['account_id'])
tmp += '-'+str(l.get('tax_code_id',"False"))
tmp += '-'+str(l.get('product_id',"False"))
tmp += '-'+str(l.get('analytic_account_id',"False"))
tmp += '-'+str(l.get('date_maturity',"False"))
if tmp in line2:
am = line2[tmp]['debit'] - line2[tmp]['credit'] + (l['debit'] - l['credit'])
line2[tmp]['debit'] = (am > 0) and am or 0.0
line2[tmp]['credit'] = (am < 0) and -am or 0.0
line2[tmp]['tax_amount'] += l['tax_amount']
line2[tmp]['analytic_lines'] += l['analytic_lines']
else:
line2[tmp] = l
line = []
for key, val in line2.items():
line.append((0,0,val))
journal_id = inv.journal_id.id #self._get_journal(cr, uid, {'type': inv['type']})
journal = self.pool.get('account.journal').browse(cr, uid, journal_id)
if journal.centralisation:
raise osv.except_osv(_('UserError'),
_('Cannot create invoice move on centralised journal'))
move = {'ref': inv.number, 'line_id': line, 'journal_id': journal_id, 'date': date}
period_id=inv.period_id and inv.period_id.id or False
if not period_id:
period_ids= self.pool.get('account.period').search(cr,uid,[('date_start','<=',inv.date_invoice or time.strftime('%Y-%m-%d')),('date_stop','>=',inv.date_invoice or time.strftime('%Y-%m-%d'))])
if len(period_ids):
period_id=period_ids[0]
if period_id:
move['period_id'] = period_id
for i in line:
i[2]['period_id'] = period_id
move_id = self.pool.get('account.move').create(cr, uid, move, context=context)
new_move_name = self.pool.get('account.move').browse(cr, uid, move_id).name
# make the invoice point to that move
self.write(cr, uid, [inv.id], {'move_id': move_id,'period_id':period_id, '
<FILEB>
<CHANGES>
name = kwargs.pop('name',None)
<CHANGEE>
<FILEE>
<FILEB>
def c_headers(self):
"""Override `CLinkerOp.c_headers` """
return scal.Scalar(self.dtype).c_headers()
def c_libraries(self):
return scal.Scalar(self.dtype).c_libraries()
def c_compile_args(self):
return scal.Scalar(self.dtype).c_compile_args()
def c_support_code(self):
"""Override `CLinkerOp.c_support_code` """
return scal.Scalar(self.dtype).c_support_code()
def c_code_cache_version(self):
scalar_version = scal.Scalar(self.dtype).c_code_cache_version()
if scalar_version:
return (4,) + scalar_version
else:
return ()
# Easy constructors
def tensor(*args, **kwargs):
<CHANGES>
name = kwargs.get('name',None)
<CHANGEE>
return TensorType(*args, **kwargs).make_variable(name=name)
def _multi(*fns):
def f2(f, *names):
if names and isinstance(names[0], int):
if names == 1:
return f()
else:
return [f() for i in xrange(names[0])]
if isinstance(names, tuple):
if len(names) == 1:
names = names[0]
if len(names) == 1:
<FILEE>
<SCANS> rest = inputs[1:]
return [IncSubtensor(self.idx_list)(zeros_like(x), gz, *rest)] + [None] * len(rest)
def __eq__(self, other):
return type(self) == type(other) and self.idx_list == other.idx_list
def __hash__(self):
#TODO: optimize by cache this hash value
msg = []
for entry in self.idx_list:
if isinstance(entry, slice):
msg += [(entry.start, entry.stop, entry.step)]
else:
msg += [entry]
idx_list = tuple(msg)
#backport
#idx_list = tuple((entry.start, entry.stop, entry.step)
# if isinstance(entry, slice)
# else entry
# for entry in self.idx_list)
return hash(idx_list)
@staticmethod
def str_from_slice(entry):
msg = []
for x in [entry.start, entry.stop, entry.step]:
if x is None:
msg.append("")
else:
msg.append(str(x))
return ":".join(msg)
def __str__(self):
indices = []
for entry in self.idx_list:
if isinstance(entry, slice):
indices.append(self.str_from_slice(entry))
else:
indices.append(str(entry))
return "%s{%s}" % (self.__class__.__name__, ", ".join(indices))
class SubtensorPrinter:
def process(self, r, pstate):
if r.owner is None:
raise TypeError("Can only print Subtensor.")
elif isinstance(r.owner.op, Subtensor):
idxs = r.owner.op.idx_list
inputs = list(r.owner.inputs)
input = inputs.pop()
sidxs = []
inbrack_pstate = pstate.clone(precedence = -1000)
for entry in idxs:
if isinstance(entry, int):
sidxs.append(str(entry))
elif isinstance(entry, scal.Scalar):
sidxs.append(inbrack_pstate.pprinter.process(inputs.pop()))
elif isinstance(entry, slice):
if entry.start is None or entry.start==0:
msg1 = ""
else:
msg1 = entry.start
if entry.stop is None or entry.stop == sys.maxint:
msg2 = ""
else:
msg2 = entry.stop
if entry.step is None:
msg3 = ""
else:
msg3 = ":%s" % entry.step
sidxs.append("%s:%s%s" % (msg1, msg2, msg3))
#backport
#sidxs.append("%s:%s%s" % ("" if entry.start is None or entry.start == 0 else entry.start,
# "" if entry.stop is None or entry.stop == sys.maxint else entry.stop,
# "" if entry
<FILEB>
<CHANGES>
self["CurrentTime"] = Clock()
# ServicePosition(self.session.nav, ServicePosition.TYPE_REMAINING)
<CHANGEE>
<FILEE>
<FILEB>
self.state = self.STATE_HIDDEN
self["actions"] = ActionMap( [ "InfobarActions" ],
{
"switchChannelUp": self.switchChannelUp,
"switchChannelDown": self.switchChannelDown,
"mainMenu": self.mainMenu,
"zapUp": self.zapUp,
"zapDown": self.zapDown,
"volumeUp": self.volUp,
"volumeDown": self.volDown,
"volumeMute": self.volMute,
"instantRecord": self.instantRecord,
"hide": self.hide,
"toggleShow": self.toggleShow,
"showMovies": self.showMovies,
"quit": self.quit
})
# self["okbutton"] = Button("mainMenu", [self.mainMenu])
<CHANGES>
self["CurrentTime"] = ServicePosition(self.session.nav, ServicePosition.TYPE_REMAINING)
<CHANGEE>
# Clock()
self["Volume"] = self.volumeBar
self["ServiceName"] = ServiceName(self.session.nav)
self["Event_Now"] = EventInfo(self.session.nav, EventInfo.Now)
self["Event_Next"] = EventInfo(self.session.nav, EventInfo.Next)
self["Event_Now_Duration"] = EventInfo(self.session.nav, EventInfo.Now_Duration)
self["Event_Next_Duration"] = EventInfo(self.session.nav, EventInfo.Next_Duration)
self.recording = None
self.pos = 0
def mainMenu(self):
print "loading mainmenu XML..."
menu = mdom.childNodes[0]
<FILEE>
<SCANS>from Screen import Screen
from ChannelSelection import ChannelSelection
from Components.Clock import Clock
from Components.VolumeBar import VolumeBar
from Components.ActionMap import ActionMap
from Components.Button import Button
from Components.ServiceName import ServiceName
from Components.EventInfo import EventInfo
from Components.ServicePosition import ServicePosition
from Screens.MessageBox import MessageBox
from Screens.MovieSelection import MovieSelection
from enigma import *
import time
# hack alert!
from Menu import MainMenu, mdom
class InfoBar(Screen):
STATE_HIDDEN = 0
STATE_HIDING = 1
STATE_SHOWING = 2
STATE_SHOWN = 3
def __init__(self, session):
Screen.__init__(self, session)
#instantiate forever
self.servicelist = self.session.instantiateDialog(ChannelSelection)
self.volumeBar = VolumeBar()
assert menu.tagName == "menu", "root element in menu must be 'menu'!"
self.session.open(MainMenu, menu, menu.childNodes)
def switchChannelUp(self):
self.servicelist.moveUp()
self.session.execDialog(self.servicelist)
def switchChannelDown(self):
self.servicelist.moveDown()
self.session.execDialog(self.servicelist)
def hide(self):
self.instance.hide()
def toggleShow(self):
if self.state == self.STATE_SHOWN:
# self.instance.hide()
self.startHide()
else:
# self.instance.show()
self.startShow()
def zapUp(self):
self.servicelist.moveUp()
self.servicelist.zap()
def zapDown(self):
self.servicelist.moveDown()
self.servicelist.zap()
def volUp(self):
eDVBVolumecontrol.getInstance().volumeUp()
self.volumeBar.setValue(eDVBVolumecontrol.getInstance().getVolume())
def volDown(self):
eDVBVolumecontrol.getInstance().volumeDown()
self.volumeBar.setValue(eDVBVolumecontrol.getInstance().getVolume())
def startShow(self):
self.instance.m_animation.startMoveAnimation(ePoint(0, 600), ePoint(0, 380), 100)
self.state = self.STATE_SHOWN
def startHide(self):
self.instance.m_animation.startMoveAnimation(ePoint(0, 38
<FILEB>
<CHANGES>
with GLSecureFile(filepath) as f:
<CHANGEE>
<FILEE>
<FILEB>
'status': u'processing',
'path': ifile.file_path,
'size': ifile.size,
'receiver': admin_serialize_receiver(receiver, GLSettings.memory_copy.default_language)
})
return receiverfiles_maps
def fsops_pgp_encrypt(fpath, recipient_pgp):
"""return"""
"""path of encrypted file,"""
"""length of the encrypted file"""
"""this function is used to encrypt a file for a specific recipient."""
"""commonly 'receiver_desc' is expected as second argument;"""
"""anyhow a simpler dict can be used."""
"""required keys are checked on top"""
gpoj = GLBPGP()
try:
gpoj.load_key(recipient_pgp['pgp_key_public'])
filepath = os.path.join(GLSettings.submission_path, fpath)
<CHANGES>
with open(filepath) as f:
<CHANGEE>
encrypted_file_path = os.path.join(os.path.abspath(GLSettings.submission_path), "pgp_encrypted-%s" % generateRandomKey(16))
_, encrypted_file_size = gpoj.encrypt_file(recipient_pgp['pgp_key_fingerprint'], f, encrypted_file_path)
except:
raise
finally:
# the finally statement is always called also if
# except contains a return or a raise
gpoj.destroy_environment()
return encrypted_file_path, encrypted_file_size
def process_files(receiverfiles_maps):
"""@param receiverfiles_maps: the mapping of ifile/rfiles to be created on filesystem"""
"""@return: return None"""
<FILEE>
<SCANS>debug(":( NOT all receivers support PGP and the system allows plaintext version of files: %s saved as plaintext file %s" %
(ifile_path, plain_path))
try:
with open(plain_path, "wb") as plaintext_f, GLSecureFile(ifile_path) as encrypted_file:
chunk_size = 4096
written_size = 0
while True:
chunk = encrypted_file.read(chunk_size)
if len(chunk) == 0:
if written_size != receiverfiles_map['ifile_size']:
log.err("Integrity error on rfile write for ifile %s; ifile_size(%d), rfile_size(%d)" %
(ifile_id, receiverfiles_map['ifile_size'], written_size))
break
written_size += len(chunk)
plaintext_f.write(chunk)
receiverfiles_map['ifile_path'] = plain_path
except Exception as excep:
log.err("Unable to create plaintext file %s: %s" % (plain_path, excep))
else:
log.debug("All Receivers support PGP or the system denies plaintext version of files: marking internalfile as removed")
# the original AES file should always be deleted
log.debug("Deleting the submission AES encrypted file: %s" % ifile_path)
# Remove the AES file
try:
os.remove(ifile_path)
except OSError as ose:
log.err("Unable to remove %s: %s" % (ifile_path, ose.message))
# Remove the AES file key
try:
os.remove(os.path.join(GLSettings.ramdisk_path, ("%s%s" % (GLSettings.AES_keyfile_prefix, ifile_name))))
except OSError as ose:
log.err("Unable to remove keyfile associated with %s: %s" % (ifile_path, ose.message))
@transact
def update_internalfile_and_store_receiverfiles(store, receiverfiles_maps):
for ifile_id, receiverfiles_map in receiverfiles_maps.iteritems():
ifile = store.find(InternalFile, InternalFile.id == ifile_id).one()
if ifile is None:
continue
ifile.new = False
# update filepath possibly changed in case of plaintext file needed
ifile.file_path = receiverfiles_map['ifile_path']
for rf in receiverfiles_map['rfiles']:
rfile = store.find(ReceiverFile, ReceiverFile.id == rf['id']).one()
if rfile is None:
continue
rfile.status = rf['status']
rfile.file_path = rf['path']
rfile.size
<FILEB>
<CHANGES>
"%s=%r" % (k, v) for k, v in sorted(self.items())
<CHANGEE>
<FILEE>
<FILEB>
self.pop(key)
elif value is True:
self[key] = None
else:
self[key] = value
def _del_cache_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return dump_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
" ".join(
<CHANGES>
"{0}={1!r}".format(k, v) for k, v in sorted(self.iteritems())
<CHANGEE>
),
)
class RequestCacheControl(ImmutableDictMixin, _CacheControl):
"""A cache control for requests. This is immutable and gives access"""
"""to all the request-relevant cache control headers."""
"""To get a header of the :class:`RequestCacheControl` object again you can"""
"""convert the object into a string or call the :meth:`to_header` method. If"""
"""you plan to subclass it and add your own items have a look at the sourcecode"""
"""for that class."""
""".. versionadded:: 0.5"""
"""In previous versions a `CacheControl` class existed that was used"""
"""both for request and response."""
<FILEE>
<SCANS> callable that is used to cast the value in the"""
""":class:`Headers`. If a :exc:`ValueError` is raised"""
"""by this callable the value will be removed from the list."""
""":return: a :class:`list` of all the values for the key."""
""":param as_bytes: return bytes instead of unicode strings."""
ikey = key.lower()
result = []
for k, v in self:
if k.lower() == ikey:
if as_bytes:
v = v.encode('latin1')
if type is not None:
try:
v = type(v)
except ValueError:
continue
result.append(v)
return result
def get_all(self, name):
"""Return a list of all the values for the named field."""
"""This method is compatible with the :mod:`wsgiref`"""
""":meth:`~wsgiref.headers.Headers.get_all` method."""
return self.getlist(name)
def items(self, lower=False):
for key, value in self:
if lower:
key = key.lower()
yield key, value
def keys(self, lower=False):
for key, _ in iteritems(self, lower):
yield key
def values(self):
for _, value in iteritems(self):
yield value
def extend(self, iterable):
"""Extend the headers with a dict or an iterable yielding keys and"""
"""values."""
if isinstance(iterable, dict):
for key, value in iteritems(iterable):
if isinstance(value, (tuple, list)):
for v in value:
self.add(key, v)
else:
self.add(key, value)
else:
for key, value in iterable:
self.add(key, value)
def __delitem__(self, key, _index_operation=True):
if _index_operation and isinstance(key, (integer_types, slice)):
del self._list[key]
return
key = key.lower()
new = []
for k, v in self._list:
if k.lower() != key:
new.append((k, v))
self._list[:] = new
def remove(self, key):
"""Remove a key."""
""":param key: The key to be removed."""
return self.__delitem__(key, _index_operation=False)
def pop(self, key=None, default=_missing):
"""Removes and returns a key or index."""
""":param key: The key to be popped. If this is an integer the item at"""
"""that position is removed, if it's a string the value for"""
"""that key is. If the key is omitted or `None` the last"""
"""item is removed."""
""":return: an item."""
if key is None:
return self._list.pop()
if isinstance(key, integer_types):
return self._list.pop(key)
try:
rv = self[key]
self.remove(key)
except KeyError:
if default is not _
<FILEB>
<CHANGES>
pipeline = Pipeline([('vect', CountVectorizer(min_df=1)),
<CHANGEE>
<FILEE>
<FILEB>
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# simulate iterables
train_data = iter(data[1:-1])
test_data = iter([data[0], data[-1]])
# label junk food as -1, the others as +1
y = np.ones(len(data))
y[:6] = -1
y_train = y[1:-1]
y_test = np.array([y[0], y[-1]])
<CHANGES>
pipeline = Pipeline([('vect', CountVectorizer()),
<CHANGEE>
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('l1', 'l2')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# cross-validation doesn't work if the length of the data is not known,
# hence use lists instead of iterators
pred = grid_search.fit(list(train_data), y_train).predict(list(test_data))
assert_array_equal(pred, y_test)
<FILEE>
<SCANS>get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, min_df=1)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(min_df=1), CountVectorizer(min_df=1)):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# simulate iterables
train_data = iter(data[1:-1])
test_data = iter([data[0], data[-1]])
# label junk food as -1, the others as +1
y = np.ones(len(data))
y[:6] = -1
y_train = y[1:-1]
y_test = np.array([y[0], y[-1]])
pipeline = Pipeline([('vect', TfidfVectorizer(min_df=
<FILEB>
<CHANGES>
formatted_time = str(datetime.timedelta(seconds=int(estimated_time_remaining)))
<CHANGEE>
<FILEE>
<FILEB>
# No validation set, so just assume it's the best so far.
is_best_so_far = True
val_metrics = {}
best_epoch_val_metrics = {}
this_epoch_val_metric = None
self._save_checkpoint(epoch, validation_metric_per_epoch, is_best=is_best_so_far)
self._metrics_to_tensorboard(epoch, train_metrics, val_metrics=val_metrics)
self._metrics_to_console(train_metrics, val_metrics)
if self._learning_rate_scheduler:
# The LRScheduler API is agnostic to whether your schedule requires a validation metric -
# if it doesn't, the validation metric passed here is ignored.
self._learning_rate_scheduler.step(this_epoch_val_metric, epoch)
epoch_elapsed_time = time.time() - epoch_start_time
logger.info("Epoch duration: %s", time.strftime("%H:%M:%S", time.gmtime(epoch_elapsed_time)))
if epoch < self._num_epochs - 1:
training_elapsed_time = time.time() - training_start_time
estimated_time_remaining = training_elapsed_time * \
((self._num_epochs - epoch_counter) / float(epoch - epoch_counter + 1) - 1)
<CHANGES>
formatted_time = time.strftime("%H:%M:%S", time.gmtime(estimated_time_remaining))
<CHANGEE>
logger.info("Estimated training time remaining: %s", formatted_time)
epochs_trained += 1
training_elapsed_time = time.time() - training_start_time
metrics = {
"training_duration": time.strftime("%H:%M:%S", time.gmtime(training_elapsed_time)),
"training_start_epoch": epoch_counter,
"training_epochs": epochs_trained
}
for key, value in train_metrics.items():
metrics["training_" + key] = value
for key, value in val_metrics.items():
metrics["validation_" + key] = value
<FILEE>
<SCANS> %-10s"
metric_names = set(train_metrics.keys())
if val_metrics:
metric_names.update(val_metrics.keys())
name_length = max([len(x) for x in metric_names])
logger.info(header_template, "Training".rjust(name_length + 13), "Validation")
for name in metric_names:
train_metric = train_metrics.get(name)
val_metric = val_metrics.get(name)
if val_metric is not None and train_metric is not None:
logger.info(dual_message_template, name.ljust(name_length), train_metric, val_metric)
elif val_metric is not None:
logger.info(no_train_message_template, name.ljust(name_length), "N/A", val_metric)
elif train_metric is not None:
logger.info(no_val_message_template, name.ljust(name_length), train_metric, "N/A")
def _validation_loss(self) -> Tuple[float, int]:
"""Computes the validation loss. Returns it and the number of batches."""
logger.info("Validating")
self._model.eval()
if self._validation_iterator is not None:
val_iterator = self._validation_iterator
else:
val_iterator = self._iterator
val_generator = val_iterator(self._validation_data,
num_epochs=1,
shuffle=False,
cuda_device=self._iterator_device)
num_validation_batches = val_iterator.get_num_batches(self._validation_data)
val_generator_tqdm = Tqdm.tqdm(val_generator,
total=num_validation_batches)
batches_this_epoch = 0
val_loss = 0
for batch in val_generator_tqdm:
loss = self._batch_loss(batch, for_training=False)
if loss is not None:
# You shouldn't necessarily have to compute a loss for validation, so we allow for
# `loss` to be None. We need to be careful, though - `batches_this_epoch` is
# currently only used as the divisor for the loss function, so we can safely only
# count those batches for which we actually have a loss. If this variable ever
<FILEB>
<CHANGES>
class CoercionNode(NewTempExprNode):
<CHANGEE>
<FILEE>
<FILEB>
}
def binop_node(pos, operator, operand1, operand2):
# Construct binop node of appropriate class for
# given operator.
return binop_node_classes[operator](pos,
operator = operator,
operand1 = operand1,
operand2 = operand2)
#-------------------------------------------------------------------
#
# Coercion nodes
#
# Coercion nodes are special in that they are created during
# the analyse_types phase of parse tree processing.
# Their __init__ methods consequently incorporate some aspects
# of that phase.
#
#-------------------------------------------------------------------
<CHANGES>
class CoercionNode(ExprNode):
<CHANGEE>
# Abstract base class for coercion nodes.
#
# arg ExprNode node being coerced
subexprs = ['arg']
def __init__(self, arg):
self.pos = arg.pos
self.arg = arg
if debug_coercion:
print("%s Coercing %s" % (self, self.arg))
def annotate(self, code):
self.arg.annotate(code)
if self.arg.type != self.type:
<FILEE>
<SCANS>, env):
self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.type = py_object_type
self.gil_check(env)
self.is_temp = 1
gil_message = "Backquote expression"
def generate_result_code(self, code):
code.putln(
"%s = PyObject_Repr(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
class ImportNode(ExprNode):
# Used as part of import statement implementation.
# Implements result =
# __import__(module_name, globals(), None, name_list)
#
# module_name IdentifierStringNode dotted name of module
# name_list ListNode or None list of names to be imported
subexprs = ['module_name', 'name_list']
def analyse_types(self, env):
self.module_name.analyse_types(env)
self.module_name = self.module_name.coerce_to_pyobject(env)
if self.name_list:
self.name_list.analyse_types(env)
self.name_list.coerce_to_pyobject(env)
self.type = py_object_type
self.gil_check(env)
self.is_temp = 1
env.use_utility_code(import_utility_code)
gil_message = "Python import"
def generate_result_code(self, code):
if self.name_list:
name_list_code = self.name_list.py_result()
else:
name_list_code = "0"
code.putln(
"%s = __Pyx_Import(%s, %s); %s" % (
self.result(),
self.module_name.py_result(),
name_list_code,
code.error_goto_if_null(self.result(), self.pos)))
class IteratorNode(NewTempExprNode):
# Used as part of for statement implementation.
#
# allocate_counter_temp/release_counter_temp needs to be called
# by parent (ForInStatNode)
#
# Implements result = iter(sequence)
#
# sequence ExprNode
subexprs = ['sequence']
def analyse_types(self, env):
self.sequence.analyse_types(env)
self.sequence = self.sequence.coerce_to_pyobject(env)
self.type = py_object_type
self.gil_check(env)
self.is_temp = 1
gil_message = "Iterating over Python object"
def allocate_counter_temp(self, code):
self.counter_cname = code.funcstate.allocate_temp(
PyrexTypes.c_py_ssize_t_type, manage_ref=False)
def release_counter_temp(self, code):
code.funcstate.release_temp(self.counter_cname)
def generate_result_code(self, code):
is_builtin_sequence = self.sequence.type is list_type
<FILEB>
<CHANGES>
db = db_connect()
<CHANGEE>
<FILEE>
<FILEB>
""""message": - operation result description"""
""""match_id": - match_id of match_report"""
"""}"""
try:
match_id = None
if type(data).__name__ == 'str':
data = parse_stats_submission( data )
if is_instagib(data):
data["game_meta"]["G"] = "i" + data["game_meta"]["G"]
if is_tdm2v2(data):
data["game_meta"]["G"] = "tdm2v2"
match_id = data["game_meta"]["I"]
if data["game_meta"]["G"] not in GAMETYPE_IDS:
return {
"ok": False,
"message": "gametype is not accepted: " + data["game_meta"]["G"],
"match_id": match_id
}
<CHANGES>
db = db.connect()
<CHANGEE>
cu = db.cursor()
team_scores = [None, None]
team_index = -1
for team_data in data["teams"]:
team_index = int( team_data["Q"].replace("team#", "") ) - 1
for key in ["scoreboard-rounds", "scoreboard-caps", "scoreboard-score"]:
if key in team_data:
team_scores[team_index] = int(team_data[key])
team1_score, team2_score = team_scores
match_timestamp = int( data["game_meta"]["1"] )
cu.execute("INSERT INTO matches (match_id, gametype_id, factory_id, map_id, timestamp, duration, team1_score, team2_score, post_processed) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)", [
match_id,
<FILEE>
<SCANS> m.gametype_id, s.old_mean'''
'''FROM'''
'''matches m'''
'''LEFT JOIN scoreboards s ON s.match_id = m.match_id'''
'''WHERE'''
'''s.old_mean IS NOT NULL AND'''
'''s.steam_id = %s AND'''
'''m.gametype_id = %s'''
'''ORDER BY m.timestamp DESC'''
'''LIMIT 50'''
''') m ON m.gametype_id = g.gametype_id'''
'''LEFT JOIN (''' + SQL_TOP_PLAYERS_BY_GAMETYPE + ''') rt ON rt.steam_id = p.steam_id'''
'''WHERE'''
'''p.steam_id = %s AND'''
'''g.gametype_id = %s'''
'''ORDER BY m.timestamp ASC'''
cu.execute(query, [steam_id, gametype_id, LAST_GAME_TIMESTAMPS[ gametype_id ]-KEEPING_TIME, gametype_id, steam_id, gametype_id])
for row in cu.fetchall():
result[ "_id" ] = str(row[0])
result[ "name" ] = row[1]
result[ "model" ] = row[2]
if gametype not in result and row[4] != None:
result[ gametype ] = {"rating": round(row[4], 2), "n": row[5], "history": [], "rank": row[9], "max_rank": row[10]}
if row[8] != None:
result[ gametype ][ "history" ].append({"match_id": row[6], "timestamp": row[7], "rating": round(row[8], 2)})
result = {
"ok": True,
"player": result
}
except Exception as e:
db.rollback()
traceback.print_exc(file=sys.stderr)
result = {
"ok": False,
"message": type(e).__name__ + ": " + str(e)
}
finally:
cu.close()
db.close()
return result
def get_player_info2( steam_id ):
result = {}
try:
db = db_connect()
cu = db.cursor()
# player name, rating and games played
cu.execute('''SELECT p.name, gr.mean, gr.n, g.gametype_short, g.gametype_name'''
'''FROM players p'''
'''LEFT JOIN gametype_ratings gr ON p.steam_id = gr.steam_id'''
'''LEFT JOIN gametypes g ON g.gametype_
<FILEB>
<CHANGES>
username = yield self.authenticator.get_authenticated_user(self, None)
<CHANGEE>
<FILEE>
<FILEB>
self.log.info('redirect_uri: %r', redirect_uri)
self.authorize_redirect(
redirect_uri=redirect_uri,
client_id=self.authenticator.client_id,
scope=['openid', 'email'],
response_type='code')
class GoogleOAuthHandler(OAuthCallbackHandler, GoogleOAuth2Mixin):
@gen.coroutine
def get(self):
self.settings['google_oauth'] = {
'key': self.authenticator.client_id,
'secret': self.authenticator.client_secret,
'scope': ['openid', 'email']
}
self.log.debug('google: settings: "%s"', str(self.settings['google_oauth']))
# FIXME: we should verify self.settings['google_oauth']['hd']
# "Cannot redirect after headers have been written" ?
#OAuthCallbackHandler.get(self)
<CHANGES>
username = yield self.authenticator.get_authenticated_username(self, None)
<CHANGEE>
self.log.info('google: username: "%s"', username)
if username:
user = self.user_from_username(username)
self.set_login_cookie(user)
self.redirect(url_path_join(self.hub.server.base_url, 'home'))
else:
# todo: custom error
raise HTTPError(403)
class GoogleOAuthenticator(OAuthenticator, GoogleOAuth2Mixin):
login_handler = GoogleLoginHandler
callback_handler = GoogleOAuthHandler
hosted_domain = Unicode(
<FILEE>
<SCANS>"""Custom Authenticator to use Google OAuth with JupyterHub."""
"""Derived from the GitHub OAuth authenticator."""
import os
import json
from tornado import gen
from tornado.auth import GoogleOAuth2Mixin
from tornado.web import HTTPError
from traitlets import Unicode
from jupyterhub.auth import LocalAuthenticator
from jupyterhub.utils import url_path_join
from .oauth2 import OAuthLoginHandler, OAuthCallbackHandler, OAuthenticator
class GoogleLoginHandler(OAuthLoginHandler, GoogleOAuth2Mixin):
'''An OAuthLoginHandler that provides scope to GoogleOAuth2Mixin's'''
'''authorize_redirect.'''
def get(self):
guess_uri = '{proto}://{host}{path}'.format(
proto=self.request.protocol,
host=self.request.host,
path=url_path_join(
self.hub.server.base_url,
'oauth_callback'
)
)
redirect_uri = self.authenticator.oauth_callback_url or guess_uri
os.environ.get('HOSTED_DOMAIN', ''),
config=True,
help="""Hosted domain used to restrict sign-in, e.g. mycollege.edu"""
)
login_service = Unicode(
os.environ.get('LOGIN_SERVICE', 'Google'),
config=True,
help="""Google Apps hosted domain string, e.g. My College"""
)
@gen.coroutine
def authenticate(self, handler, data=None):
code = handler.get_argument('code', False)
if not code:
raise HTTPError(400, "oauth callback made without a token")
if not self.oauth_callback_url:
raise HTTPError(500, "No callback URL")
user = yield handler.get_authenticated_user(
redirect_uri=self.oauth_callback_url,
code=code)
access_token = str(user['access_token'])
http_client = handler.get_auth_http_client()
response = yield http_client.fetch(
self._OAUTH_USERINFO_URL + '?access_token=' + access_token
)
if not response:
self.clear_all_cookies()
raise HTTPError(500, 'Google authentication failed')
body = response.body.decode()
self.log.debug('response.body.decode(): {}'.format(body))
bodyjs = json.loads(body)
username = bodyjs['email']
if self.hosted_domain:
if not username.endswith('@'+self.hosted_domain) or \
bodyjs['hd'] != self.hosted_domain:
raise HTTPError(403,
"You are not signed in to your {} account.".format(
self.hosted_domain)
)
else:
username = username.split('@')[0]
return username
class LocalGoogleOAuthenticator(LocalAuthenticator, GoogleOAuthenticator):
"""A version that mixes in local system user creation"""
<FILEB>
<CHANGES>
except HierarchyRequestErr:
<CHANGEE>
<FILEE>
<FILEB>
def testTextNodeRepr(): pass
def testWriteXML():
str = '<a b="c"/>'
dom = parseString(str)
domstr = dom.toxml()
dom.unlink()
confirm(str == domstr)
confirm(len(Node.allnodes) == 0)
def testProcessingInstruction(): pass
def testProcessingInstructionRepr(): pass
def testTextRepr(): pass
def testWriteText(): pass
def testDocumentElement(): pass
def testTooManyDocumentElements():
doc = parseString("<doc/>")
elem = doc.createElement("extra")
try:
doc.appendChild(elem)
<CHANGES>
except TypeError:
<CHANGEE>
print "Caught expected exception when adding extra document element."
else:
print "Failed to catch expected exception when" \
" adding extra document element."
elem.unlink()
doc.unlink()
def testCreateElementNS(): pass
def testCreateAttributeNS(): pass
def testParse(): pass
def testParseString(): pass
def testComment(): pass
def testAttrListItem(): pass
<FILEE>
<SCANS># test for xml.dom.minidom
from xml.dom.minidom import parse, Node, Document, parseString
from xml.dom import HierarchyRequestErr
import xml.parsers.expat
import os.path
import sys
import traceback
from test_support import verbose
if __name__ == "__main__":
base = sys.argv[0]
else:
base = __file__
tstfile = os.path.join(os.path.dirname(base), "test.xml")
del base
def confirm(test, testname = "Test"):
if test:
print "Passed " + testname
else:
print "Failed " + testname
raise Exception
Node._debug = 1
def testParseFromFile():
from StringIO import StringIO
dom = parse(StringIO(open(tstfile).read()))
dom.unlink()
confirm(isinstance(dom,Document))
def testGetElementsByTagName():
dom = parse(tstfile)
confirm(dom.getElementsByTagName("LI") == \
dom.documentElement.getElementsByTagName("LI"))
dom.unlink()
def testInsertBefore():
dom = parseString("<doc><foo/></doc>")
root = dom.documentElement
elem = root.childNodes[0]
nelem = dom.createElement("element")
root.insertBefore(nelem, elem)
confirm(len(root.childNodes) == 2
and root.childNodes[0] is nelem
and root.childNodes[1] is elem
and root.firstChild is nelem
and root.lastChild is elem
and root.toxml() == "<doc><element/><foo/></doc>"
, "testInsertBefore -- node properly placed in tree")
nelem = dom.createElement("element")
root.insertBefore(nelem, None)
confirm(len(root.childNodes) == 3
and root.childNodes[1] is elem
and root.childNodes[2] is nelem
and root.lastChild is nelem
and nelem.previousSibling is elem
and root.toxml() == "<doc><element/><foo/><element/></doc>"
, "testInsertBefore -- node properly placed in tree")
nelem2 = dom.createElement("bar")
root.insertBefore(nelem2, nelem)
confirm(len(root.childNodes) == 4
and root.childNodes[2] is nelem2
and root.childNodes[3] is nelem
and nelem2.nextSibling is nelem
and nelem.previousSibling is nelem2
and root.toxml() == "<doc><element/><foo/><bar/><element/></doc>"
, "testInsertBefore -- node properly placed in tree")
dom.unlink()
def testAppendChild():
dom = parse(tstfile)
dom.documentElement.appendChild(dom.createComment(u"Hello"))
confirm(dom.documentElement.childNodes[-1].nodeName == "#comment")
confirm(dom.documentElement.childNodes[-1].data == "Hello")
dom.unlink()
def testLegalChildren():
dom = Document()
elem = dom.createElement('element')
text = dom.createTextNode('text')
try: dom.appendChild(text)
except HierarchyRequestErr: pass
else:
print "dom.appendChild didn't raise HierarchyRequestErr"
dom.appendChild(elem)
try: dom.insertBefore(text, elem)
except HierarchyRequestErr: pass
else:
print "dom.appendChild didn't raise HierarchyRequestErr"
try: dom.replaceChild(text, elem)
except
<FILEB>
<CHANGES>
self.openMultiServiceEPG
#self.openGraphEPG()
<CHANGEE>
<FILEE>
<FILEB>
return None
def InfoPressed(self):
if self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
self.secondInfoBarScreen.hide()
self.secondInfoBarWasShown = False
if config.misc.boxtype.getValue().startswith('et') or config.misc.boxtype.getValue().startswith('odin') or config.misc.boxtype.getValue().startswith('venton') or config.misc.boxtype.getValue().startswith('tm') or config.misc.boxtype.getValue().startswith('gb') or getBoxType().startswith('xp1000'):
self.openEventView()
else:
self.showDefaultEPG()
def IPressed(self):
if self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
self.secondInfoBarScreen.hide()
self.secondInfoBarWasShown = False
self.openEventView()
def EPGPressed(self):
if self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
self.secondInfoBarScreen.hide()
self.secondInfoBarWasShown = False
<CHANGES>
self.openGraphEPG()
<CHANGEE>
def showEventInfoWhenNotVisible(self):
if self.secondInfoBarScreen and self.secondInfoBarScreen.shown:
self.secondInfoBarScreen.hide()
self.secondInfoBarWasShown = False
if self.shown:
self.openEventView()
else:
self.toggleShow()
return 1
def zapToService(self, service, bouquet=None):
if service is not None:
if bouquet:
<FILEE>
<SCANS> = epgcall
self.session.openWithCallback(self.onBouquetSelectorClose, EPGBouquetSelector, self.bouquets, self.epg_bouquet)
def onBouquetSelectorClose(self, bouquet):
if bouquet:
services = self.getBouquetServices(bouquet)
if len(services):
self.epg_bouquet = bouquet
self.epg.setServices(services)
self.epg.setTitle(ServiceReference(self.epg_bouquet).getServiceName())
def closed(self, ret=False):
closedScreen = self.dlg_stack.pop()
if self.bouquetSel and closedScreen == self.bouquetSel:
self.bouquetSel = None
elif self.eventView and closedScreen == self.eventView:
self.eventView = None
if ret:
dlgs=len(self.dlg_stack)
if dlgs > 0:
self.dlg_stack[dlgs-1].close(dlgs > 1)
def openMultiServiceEPG(self, withCallback=True):
self.EPGtype = "multi"
Servicelist = self.servicelist
self.StartBouquet = Servicelist
if config.epgselection.showbouquet_multi.getValue():
self.bouquets = self.servicelist.getBouquetList()
if self.bouquets is None:
cnt = 0
else:
cnt = len(self.bouquets)
if cnt > 1: # show bouquet list
if withCallback:
self.bouquetSel = self.session.openWithCallback(self.closed, BouquetSelector, self.bouquets, self.openBouquetEPG, enableWrapAround=True)
self.dlg_stack.append(self.bouquetSel)
else:
self.bouquetSel = self.session.open(BouquetSelector, self.bouquets, self.openBouquetEPG, enableWrapAround=True)
elif cnt == 1:
self.openBouquetEPG(self.bouquets[0][1], withCallback, self.StartBouquet)
else:
self.bouquets = Servicelist and self.servicelist.getBouquetList()
self.epg_bouquet = Servicelist and Servicelist.getRoot()
if self.epg_bouquet is not None
<FILEB>
<CHANGES>
username=user_info['screen_name'])
<CHANGEE>
<FILEE>
<FILEB>
"""Actually setup/login an account relating to a twitter user after the oauth"""
"""process is finished successfully"""
client = OAuthTwitter(
request, settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET_KEY,
settings.TWITTER_REQUEST_TOKEN_URL,
)
user_info = client.get_user_info()
if request.user.is_authenticated():
# Handling already logged in users connecting their accounts
try:
profile = TwitterProfile.objects.get(twitter_id=user_info['id'])
except TwitterProfile.DoesNotExist: # There can only be one profile!
profile = TwitterProfile.objects.create(twitter_id=user_info['id'])
return HttpResponseRedirect(_get_next(request))
user = authenticate(twitter_id=user_info['id'])
if user is None:
profile = TwitterProfile(twitter_id=user_info['id'],
<CHANGES>
screen_name=user_info['screen_name'])
<CHANGEE>
user = User()
request.session['socialregistration_profile'] = profile
request.session['socialregistration_user'] = user
request.session['next'] = _get_next(request)
return HttpResponseRedirect(reverse('socialregistration_setup'))
if not user.is_active:
return render_to_response(
account_inactive_template,
extra_context,
context_instance=RequestContext(request)
)
login(request, user)
<FILEE>
<SCANS>Actually setup/login an account relating to a hyves user after the oauth """
"""process is finished successfully"""
client = OAuthHyves(
request, settings.HYVES_CONSUMER_KEY,
settings.HYVES_CONSUMER_SECRET_KEY,
settings.HYVES_REQUEST_TOKEN_URL,
)
user_info = client.get_user_info()
user = authenticate(hyves_id=user_info['id'])
if user is None:
profile = HyvesProfile(hyves_id=user_info['id'],
username=user_info['screen_name'],
avatar=user_info['avatar'],
url=user_info['url'],
)
user = User()
request.session['socialregistration_profile'] = profile
request.session['socialregistration_user'] = user
request.session['next'] = _get_next(request)
return HttpResponseRedirect(reverse('socialregistration_setup'))
else:
try:
profile = HyvesProfile.objects.get(user=user)
except HyvesProfile.DoesNotExist:
pass
else:
profile.avatar = user_info['avatar']
profile.save()
login(request, user)
request.user.message_set.create(message=_('You have succesfully been logged in with your hyves account'))
return HttpResponseRedirect(_get_next(request))
def linkedin(request):
"""Actually setup/login an account relating to a linkedin user after the oauth """
"""process is finished successfully"""
client = OAuthLinkedin(
request, settings.LINKEDIN_CONSUMER_KEY,
settings.LINKEDIN_CONSUMER_SECRET_KEY,
settings.LINKEDIN_REQUEST_TOKEN_URL,
)
user_info = client.get_user_info()
user = authenticate(linkedin_id=user_info['id'])
if user is None:
profile = LinkedinProfile(linkedin_id=user_info['id'],
username=user_info['screen_name'],
)
user = User()
request.session['socialregistration_profile'] = profile
request.session['socialregistration_user'] = user
request.session['next'] = _get_next(request)
return HttpResponseRedirect(reverse('socialregistration_setup'))
login(request, user)
request.user.message_set.create(message=_('You have succesfully been logged in with your linkedin account'))
return HttpResponseRedirect(_get_next(request))
def oauth_redirect(request, consumer_key=None, secret_key=None,
request_token_url=None, access_token_url=None, authorization_url=None,
callback_url=None, parameters=None):
"""View to handle the OAuth based authentication redirect to the service provider"""
request.session['next'] = _get_next(request)
client = OAuthClient(request, consumer_key, secret_key,
request_token_url, access_token_url, authorization_url
<FILEB>
<CHANGES>
self._append_html(text)
<CHANGEE>
<FILEE>
<FILEB>
re.match("(?:[^\n]*\n){%i}" % minlines, text):
if self.paging == 'custom':
self.custom_page_requested.emit(text)
else:
self._page_control.clear()
cursor = self._page_control.textCursor()
if html:
self._insert_html(cursor, text)
else:
self._insert_plain_text(cursor, text)
self._page_control.moveCursor(QtGui.QTextCursor.Start)
self._page_control.viewport().resize(self._control.size())
if self._splitter:
self._page_control.show()
self._page_control.setFocus()
else:
self.layout().setCurrentWidget(self._page_control)
elif html:
<CHANGES>
self._append_plain_html(text)
<CHANGEE>
else:
self._append_plain_text(text)
def _prompt_finished(self):
"""Called immediately after a prompt is finished, i.e. when some input"""
"""will be processed and a new prompt displayed."""
self._control.setReadOnly(True)
self._prompt_finished_hook()
def _prompt_started(self):
"""Called immediately after a new prompt is displayed."""
# Temporarily disable the maximum block count to permit undo/redo and
# to ensure that the prompt position does not change due to truncation.
self._control.document().setMaximumBlockCount(0)
<FILEE>
<SCANS> is one."""
# Determine where to insert the content.
cursor = self._control.textCursor()
if before_prompt and (self._reading or not self._executing):
cursor.setPosition(self._append_before_prompt_pos)
else:
cursor.movePosition(QtGui.QTextCursor.End)
start_pos = cursor.position()
# Perform the insertion.
result = insert(cursor, input)
# Adjust the prompt position if we have inserted before it. This is safe
# because buffer truncation is disabled when not executing.
if before_prompt and not self._executing:
diff = cursor.position() - start_pos
self._append_before_prompt_pos += diff
self._prompt_pos += diff
return result
def _append_html(self, html, before_prompt=False):
"""Appends HTML at the end of the console buffer."""
self._append_custom(self._insert_html, html, before_prompt)
def _append_html_fetching_plain_text(self, html, before_prompt=False):
"""Appends HTML, then returns the plain text version of it."""
return self._append_custom(self._insert_html_fetching_plain_text,
html, before_prompt)
def _append_plain_text(self, text, before_prompt=False):
"""Appends plain text, processing ANSI codes if enabled."""
self._append_custom(self._insert_plain_text, text, before_prompt)
def _cancel_text_completion(self):
"""If text completion is progress, cancel it."""
if self._text_completing_pos:
self._clear_temporary_buffer()
self._text_completing_pos = 0
def _clear_temporary_buffer(self):
"""Clears the "temporary text" buffer, i.e. all the text following"""
"""the prompt region."""
# Select and remove all text below the input buffer.
cursor = self._get_prompt_cursor()
prompt = self._continuation_prompt.lstrip()
while cursor.movePosition(QtGui.QTextCursor.NextBlock):
temp_cursor = QtGui.QTextCursor(cursor)
temp_cursor.select(QtGui.QTextCursor.BlockUnderCursor)
text = temp_cursor.selection().toPlainText().lstrip()
if not text.startswith(prompt):
break
else:
# We've reached the end of the input buffer and no text follows.
return
cursor.movePosition(QtGui.QTextCursor.Left) # Grab the newline.
cursor.movePosition(QtGui.QTextCursor.End,
QtGui.QTextCursor.KeepAnchor)
cursor.removeSelectedText()
# After doing this, we have no choice but to clear the undo/redo
# history. Otherwise, the text is not "temporary" at all, because it
# can be recalled with undo/redo. Unfortunately, Qt does not expose
# fine-grained control to the undo/redo system.
if self._control.isUndoRedoEnabled():
<FILEB>
<CHANGES>
mn.master = self
<CHANGEE>
<FILEE>
<FILEB>
return defer.succeed([{"buildername":"Builder", "brid":1}])
builder = Mock()
builder.getBuild = fakeGetBuild
builder.name = "Builder"
build = FakeBuildStatus()
build.results = SUCCESS
build.finished = True
build.reason = "testReason"
build.getBuilder.return_value = builder
self.db = fakedb.FakeDBConnector(self)
self.db.insertTestData([fakedb.SourceStampSet(id=127),
fakedb.Buildset(id=99, sourcestampsetid=127,
results=SUCCESS,
reason="testReason"),
fakedb.BuildRequest(id=11, buildsetid=99,
buildername='Builder'),
fakedb.Build(number=0, brid=11),
])
<CHANGES>
mn.parent = self
<CHANGEE>
self.status = Mock()
mn.master_status = Mock()
mn.master_status.getBuilder = fakeGetBuilder
mn.buildMessageDict = Mock()
mn.buildMessageDict.return_value = {"body":"body", "type":"text",
"subject":"subject"}
mn.buildsetFinished(99, FAILURE)
self.assertFalse(fakeBuildMessage.called)
def test_buildFinished_ignores_unspecified_categories(self):
mn = MailNotifier('from@example.org', categories=['fast'])
build = FakeBuildStatus(name="build")
build.builder = Mock()
<FILEE>
<SCANS>changeid=9123, uid=1),
fakedb.ChangeUser(changeid=9124, uid=2),
fakedb.User(uid=1, identifier="tdurden"),
fakedb.User(uid=2, identifier="user2"),
fakedb.UserInfo(uid=1, attr_type='email',
attr_data="tyler@mayhem.net"),
fakedb.UserInfo(uid=2, attr_type='email',
attr_data="user2@example.net")
])
def _getInterestedUsers():
# 'narrator' in this case is the owner, which tests the lookup
return ["Big Bob <bob@mayhem.net>", "narrator"]
build1.getInterestedUsers = _getInterestedUsers
build2.getInterestedUsers = _getInterestedUsers
def _getResponsibleUsers():
return ["Big Bob <bob@mayhem.net>"]
build1.getResponsibleUsers = _getResponsibleUsers
build2.getResponsibleUsers = _getResponsibleUsers
# fake sourcestamp with relevant user bits
ss1 = Mock(name="sourcestamp")
fake_change1 = Mock(name="change")
fake_change1.number = 9123
ss1.changes = [fake_change1]
ss1.patch, ss1.addPatch = None, None
ss2 = Mock(name="sourcestamp")
fake_change2 = Mock(name="change")
fake_change2.number = 9124
ss2.changes = [fake_change2]
ss2.patch, ss1.addPatch = None, None
def fakeGetSS(ss):
return lambda: ss
build1.getSourceStamp = fakeGetSS(ss1)
build2.getSourceStamp = fakeGetSS(ss2)
mn.master = self # FIXME: Should be FakeMaster
self.status = mn.master_status = mn.buildMessageDict = Mock()
mn.master_status.getBuilder = fakeGetBuilder
mn.buildMessageDict.return_value = {"body": "body", "type": "text"}
mn.buildMessage(builder.name, [build1, build2], build1.result)
self.assertEqual(m['To'], "tyler@mayhem.net, user2@example.net")
def create_msgdict():
unibody = u'Unicode body with non-ascii (\u00E5\u00E4\u00F6).'
msg_dict = dict(body=unibody, type='plain')
return msg_dict
<FILEB>
<CHANGES>
except botocore.exceptions.ClientError as e:
<CHANGEE>
<FILEE>
<FILEB>
if bucket.startswith("s3://"):
bucket = bucket[5:]
key = args.upload_location[bucket_end + 1:]
bams = []
for fastq in args.input_fastq:
fq_bucket_end = fastq.find('/')
fq_bucket = fastq[:fq_bucket_end]
if fq_bucket.startswith("s3://"):
fq_bucket = fq_bucket[5:]
fq1 = fastq[fq_bucket_end + 1:]
fq2 = fq1[:-15] + "2.filt.fastq.gz"
read_group_id = os.path.basename(fq1)[:-16]
# Get alignments #
bam_key = args.bam_key.format(sample=args.sample_name, run=read_group_id)
bam_in_s3 = False
s3_bam = s3.Object(bucket, bam_key)
try:
s3_bam.load()
<CHANGES>
except boocore.exceptions.ClientError as e:
<CHANGEE>
if e.response["Error"]["Code"] == "404":
pass
else:
raise e
else:
bam_in_s3 = True
if bam_in_s3:
# Alignments are already present in s3, just download #
bam_out = "/ephemeral/{read_group}_sorted.bam".format(read_group=read_group_id)
logging.info("Downloading {} to {}".format(bucket + '/' + bam_key, bam_out))
s3_bam.download_file(bam_out)
logging.info("Downloading {} to {}".format(bucket + '/' + bam_key + ".bai", bam_out + ".bai"))
<FILEE>
<SCANS>#!/usr/bin/env python3
import argparse
import boto3
import botocore
import os.path
import logging
import subprocess
import os
align_cmd = '''bwa mem -t {threads} -R '{read_group}' {ref} {fq1} {fq2} | '''
'''samblaster | '''
'''samtools view -b -u /dev/stdin |'''
'''samtools sort -@ {threads} -m {mem} -O BAM -o {out} /dev/stdin'''
index_cmd = '''samtools index {bam}'''
call_vars_cmd = '''java -Xmx{mem} -jar {gatk} -T HaplotypeCaller -R {ref} \'''
'''{input} -o {out} --emitRefConfidence GVCF \'''
'''--variant_index_type LINEAR --variant_index_parameter 128000 \'''
'''-G StandardAnnotation -A AlleleBalance -A TandemRepeatAnnotator \'''
'''-A ClippingRankSumTest -A GCContent -A MappingQualityZero \'''
'''-A SpanningDeletions -A StrandOddsRatio -A AlleleBalanceBySample '''
def download_and_align(s3, bucket, fq1, fq2, sample, read_group_id, threads, ref, mem):
fq1_local = "/ephemeral/" + os.path.basename(fq1)
fq2_local = "/ephemeral/" + os.path.basename(fq2)
logging.info("Downloading {} to {}".format(bucket + '/' + fq1, fq1_local))
s3.Object(bucket, fq1).download_file(fq1_local)
logging.info("Downloading {} to {}".format(bucket + '/' + fq2, fq2_local))
s3.Object(bucket, fq2).download_file(fq2_local)
cmd = align_cmd
cmd = cmd.format(
threads = threads,
read_group = r"@RG\tID:{}\tSM:{}".format(read_group_id, sample),
ref = ref,
fq1 = fq1_local,
fq2 = fq2_local,
mem = mem,
out = "/ephemeral/{read_group}_sorted.bam".format(read_group=read_group_id))
logging.info("Running alignment: {}".format(cmd))
subprocess.check_call(cmd, shell=True)
logging.info("Removing {} and {}".format(fq1_local, fq2_local))
os.remove(fq1_local)
os.remove(fq2_local)
return "/ephemeral/{read
<FILEB>
<CHANGES>
for article in chain(self.articles, self.translations):
<CHANGEE>
<FILEE>
<FILEB>
self.settings['FEED_RSS'], feed_type='rss')
for cat, arts in self.categories.items():
arts.sort(key=attrgetter('date'), reverse=True)
writer.write_feed(arts, self.context,
self.settings['CATEGORY_FEED'] % cat)
if 'CATEGORY_FEED_RSS' in self.settings:
writer.write_feed(arts, self.context,
self.settings['CATEGORY_FEED_RSS'] % cat,
feed_type='rss')
if 'TAG_FEED' in self.settings:
for tag, arts in self.tags.items():
arts.sort(key=attrgetter('date'), reverse=True)
writer.write_feed(arts, self.context,
self.settings['TAG_FEED'] % tag)
if 'TAG_FEED_RSS' in self.settings:
writer.write_feed(arts, self.context,
self.settings['TAG_FEED_RSS'] % tag, feed_type='rss')
translations_feeds = defaultdict(list)
<CHANGES>
for article in self.translations:
<CHANGEE>
translations_feeds[article.lang].append(article)
for lang, items in translations_feeds.items():
items.sort(key=attrgetter('date'), reverse=True)
writer.write_feed(items, self.context,
self.settings['TRANSLATION_FEED'] % lang)
def generate_pages(self, writer):
"""Generate the pages on the disk"""
"""TODO: change the name"""
templates = self.get_templates()
write = writer.write_file
for template in _DIRECT_TEMPLATES:
write('%s.html' % template, templates[template], self.context,
<FILEE>
<SCANS> = [] # only articles in default language
self.translations = []
self.dates = {}
self.tags = {}
self.categories = {}
super(ArticlesGenerator, self).__init__(*args, **kwargs)
def generate_feeds(self, writer):
"""Generate the feeds from the current context, and output files."""
writer.write_feed(self.articles, self.context, self.settings['FEED'])
if 'FEED_RSS' in self.settings:
writer.write_feed(self.articles, self.context,
blog=True)
for tag, articles in self.tags.items():
write('tag/%s.html' % tag, templates['tag'], self.context, tag=tag,
articles=articles)
for cat in self.categories:
write('category/%s.html' % cat, templates['category'], self.context,
category=cat, articles=self.categories[cat])
for article in chain(self.translations, self.articles):
write(article.save_as,
templates['article'], self.context, article=article,
category=article.category)
def generate_context(self):
"""change the context"""
# return the list of files to use
files = self.get_files(self.path, exclude=['pages',])
all_articles = []
for f in files:
content, metadatas = read_file(f)
# if no category is set, use the name of the path as a category
if 'category' not in metadatas.keys():
category = os.path.dirname(f).replace(
os.path.expanduser(self.path)+'/', '')
if category == self.path:
category = self.settings['DEFAULT_CATEGORY']
if category != '':
metadatas['category'] = unicode(category)
if 'date' not in metadatas.keys()\
and self.settings['FALLBACK_ON_FS_DATE']:
metadatas['date'] = datetime.fromtimestamp(os.stat(f).st_ctime)
article = Article(content, metadatas, settings=self.settings,
filename=f)
if not is_valid_content(article, f):
continue
if hasattr(article, 'tags'):
for tag in article.tags:
update_dict(self.tags, tag, article)
all_articles.append(article)
self.articles, self.translations = process_translations(all_articles)
for article in self.articles:
# only main articles are listed in categories, not translations
update_dict(self.categories, article.category, article)
# sort the articles by date
self.articles.sort(key=attrgetter('date'), reverse=True)
self.dates = list(self.articles
<FILEB>
<CHANGES>
except [HTTPError, ConnectionError] as e:
<CHANGEE>
<FILEE>
<FILEB>
else:
data = {'tconst': 'tt0133093'}
list_data = self._session.post('http://www.imdb.com/list/_ajax/wlb_dropdown', data=data).json()
for li in list_data['items']:
if li['wlb_text'] == self.config['list']:
self.list_id = li['data_list_id']
break
else:
raise plugin.PluginError('Could not find list %s' % self.config['list'])
self._authenticated = True
def invalidate_cache(self):
self._items = None
@property
def items(self):
if self._items is None:
try:
r = self.session.get('http://www.imdb.com/list/export?list_id=%s&author_id=%s' %
(self.list_id, self.user_id))
<CHANGES>
except HTTPError as e:
<CHANGEE>
raise PluginError(e.args[0])
lines = r.iter_lines()
# Throw away first line with headers
next(lines)
self._items = []
for row in csv.reader(lines):
row = [unicode(cell, 'utf-8') for cell in row]
log.debug('parsing line from csv: %s', ', '.join(row))
if not len(row) == 16:
log.debug('no movie row detected, skipping. %s', ', '.join(row))
continue
entry = Entry({
<FILEE>
<SCANS>from __future__ import unicode_literals, division, absolute_import
import csv
import logging
import re
from collections import MutableSet
from datetime import datetime
from requests.exceptions import ConnectionError, HTTPError
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.plugin import PluginError
from flexget.utils.requests import Session, TimedLimiter
from flexget.utils.soup import get_soup
log = logging.getLogger('imdb_list')
IMMUTABLE_LISTS = ['ratings', 'checkins']
class ImdbEntrySet(MutableSet):
schema = {
'type': 'object',
'properties': {
'login': {'type': 'string'},
'password': {'type': 'string'},
'list': {'type': 'string'},
'force_language': {'type': 'string', 'default': 'en-us'}
},
'additionalProperties': False,
'required': ['login', 'password', 'list']
}
def __init__(self, config):
self.config = config
self._session = Session()
self._session.add_domain_limiter(TimedLimiter('imdb.com', '5 seconds'))
self._session.headers = {'Accept-Language': config.get('force_language', 'en-us')}
self.user_id = None
self.list_id = None
self._items = None
self._authenticated = False
@property
def session(self):
if not self._authenticated:
self.authenticate()
return self._session
def authenticate(self):
"""Authenticates a session with imdb, and grabs any IDs needed for getting/modifying list."""
try:
r = self._session.get(
'https://www.imdb.com/ap/signin?openid.return_to=https%3A%2F%2Fwww.imdb.com%2Fap-signin-'
'handler&openid.identity=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&'
'openid.assoc_handle=imdb_mobile_us&openid.mode=checkid_setup&openid.claimed_id=http%3A%'
'2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.ns=http%3A%2F%2Fspecs.ope'
'nid.net%2Fauth%2F2.0')
except ConnectionError as e:
raise PluginError(e.args[0])
soup = get_soup(r.content)
inputs = soup.select('form#ap_signin_form input')
data = dict((i['name'], i.get('value')) for i in inputs if i.get('name'))
data['email'] = self.config['login']
data['password'] = self.config['password']
d =
<FILEB>
<CHANGES>
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.HSQL):
<CHANGEE>
<FILEE>
<FILEB>
"""PostgreSQL input: SELECT usename, passwd FROM pg_shadow"""
"""PostgreSQL output: 'HsYIBS'||COALESCE(CAST(usename AS CHARACTER(10000)), ' ')||'KTBfZp'||COALESCE(CAST(passwd AS CHARACTER(10000)), ' ')||'LkhmuP' FROM pg_shadow"""
"""Oracle input: SELECT COLUMN_NAME, DATA_TYPE FROM SYS.ALL_TAB_COLUMNS WHERE TABLE_NAME='USERS'"""
"""Oracle output: 'GdBRAo'||NVL(CAST(COLUMN_NAME AS VARCHAR(4000)), ' ')||'czEHOf'||NVL(CAST(DATA_TYPE AS VARCHAR(4000)), ' ')||'JVlYgS' FROM SYS.ALL_TAB_COLUMNS WHERE TABLE_NAME='USERS'"""
"""Microsoft SQL Server input: SELECT name, master.dbo.fn_varbintohexstr(password) FROM master..sysxlogins"""
"""Microsoft SQL Server output: 'QQMQJO'+ISNULL(CAST(name AS VARCHAR(8000)), ' ')+'kAtlqH'+ISNULL(CAST(master.dbo.fn_varbintohexstr(password) AS VARCHAR(8000)), ' ')+'lpEqoi' FROM master..sysxlogins"""
"""@param query: query string to be processed"""
"""@type query: C{str}"""
"""@return: query string nulled, casted and concatenated"""
"""@rtype: C{str}"""
if unpack:
concatenatedQuery = ""
query = query.replace(", ", ',')
fieldsSelectFrom, fieldsSelect, fieldsNoSelect, fieldsSelectTop, fieldsSelectCase, _, fieldsToCastStr, fieldsExists = self.getFields(query)
castedFields = self.nullCastConcatFields(fieldsToCastStr)
concatenatedQuery = query.replace(fieldsToCastStr, castedFields, 1)
else:
return query
<CHANGES>
if Backend.isDbms(DBMS.MYSQL):
<CHANGEE>
if fieldsExists:
concatenatedQuery = concatenatedQuery.replace("SELECT ", "CONCAT('%s'," % kb.chars.start, 1)
concatenatedQuery += ",'%s')" % kb<SCANS> None
suffix = kb.injection.suffix if kb.injection and suffix is None else suffix
if kb.technique and kb.technique in kb.injection.data:
where = kb.injection.data[kb.technique].where if where is None else where
comment = kb.injection.data[kb.technique].comment if comment is None else comment
if Backend.getIdentifiedDbms() == DBMS.ACCESS and comment == GENERIC_SQL_COMMENT:
comment = "%00"
if comment is not None:
expression += comment
# If we are replacing (<where>) the parameter original value with
# our payload do not append the suffix
if where == PAYLOAD.WHERE.REPLACE:
pass
elif suffix and not comment:
expression += " %s" % suffix
return re.sub(r"(?s);\W*;", ";", expression)
def cleanupPayload(self, payload, origValue=None):
if payload is None:
return
_ = (
("[DELIMITER_START]", kb.chars.start), ("[DELIMITER_STOP]", kb.chars.stop),\
("[AT_REPLACE]", kb.chars.at), ("[SPACE_REPLACE]", kb.chars.space), ("[DOLLAR_REPLACE]", kb.chars.dollar),\
("[HASH_REPLACE]", kb.chars.hash_),
)
payload = reduce(lambda x, y: x.replace(y[0], y[1]), _, payload)
for _ in set(re.findall(r"\[RANDNUM(?:\d+)?\]", payload, re.I)):
payload = payload.replace(_, str(randomInt()))
for _ in set(re.findall(r"\[RANDSTR(?:\d+)?\]", payload, re.I)):
payload = payload.replace(_, randomStr())
if origValue is not None:
payload = payload.replace("[ORIGVALUE]", origValue if origValue.isdigit() else "'%s'" % origValue)
if "[INFERENCE]" in payload:
if Backend.getIdentifiedDbms() is not None:
inference = queries[Backend.getIdentifiedDbms()].inference
if "dbms_version" in inference:
if isDBMSVersionAtLeast(inference.dbms_version):
inferenceQuery =
<FILEB>
<CHANGES>
if (version or '').startswith(python):
<CHANGEE>
<FILEE>
<FILEB>
possibilities.extend(
[
'python{0}{1}'.format(python[0], python[2]),
'python{0}.{1}'.format(python[0], python[2]),
'python{0}.{1}m'.format(python[0], python[2])
]
)
# Reverse the list, so we find specific ones first.
possibilities = reversed(possibilities)
for possibility in possibilities:
# Windows compatibility.
if os.name == 'nt':
possibility = '{0}.exe'.format(possibility)
versions = []
pythons = system_which(possibility, mult=True)
for p in pythons:
versions.append(python_version(p))
for i, version in enumerate(versions):
<CHANGES>
if python in (version or ''):
<CHANGEE>
return pythons[i]
def ensure_python(three=None, python=None):
def abort():
click.echo(
'You can specify specific versions of Python with:\n {0}'.format(
crayons.red('$ pipenv --python {0}'.format(os.sep.join(('path', 'to', 'python'))))
), err=True
)
sys.exit(1)
def activate_pyenv():
"""Adds all pyenv installations to the PATH."""
if PYENV_INSTALLED:
<FILEE>
<SCANS>zip'])
c = delegator.run('"{0}" {1} check --json'.format(which('python'), shellquote(path)))
try:
results = simplejson.loads(c.out)
except ValueError:
click.echo('An error occured:', err=True)
click.echo(c.err, err=True)
sys.exit(1)
for (package, resolved, installed, description, vuln) in results:
click.echo(
'{0}: {1} {2} resolved ({3} installed)!'.format(
crayons.white(vuln, bold=True),
crayons.green(package),
crayons.red(resolved, bold=False),
crayons.red(installed, bold=True)
)
)
click.echo('{0}'.format(description))
click.echo()
if not results:
click.echo(crayons.green('All good!'))
else:
sys.exit(1)
@click.command(help=u"Displays currently–installed dependency graph information.")
@click.option('--bare', is_flag=True, default=False, help="Minimal output.")
@click.option('--json', is_flag=True, default=False, help="Output JSON.")
def graph(bare=False, json=False):
try:
python_path = which('python')
except AttributeError:
click.echo(
u'{0}: {1}'.format(
crayons.red('Warning', bold=True),
u'Unable to display currently–installed dependency graph information here. '
u'Please run within a Pipenv project.',
), err=True
)
sys.exit(1)
j = '--json' if json else ''
cmd = '"{0}" {1} {2}'.format(
python_path,
shellquote(pipdeptree.__file__.rstrip('cdo')),
j
)
# Run dep-tree.
c = delegator.run(cmd)
if not bare:
if json:
data = []
for d in simplejson.loads(c.out):
if d['package']['key'] not in BAD_PACKAGES:
data.append(d)
click.echo(simplejson.dumps(data, indent=4))
sys.exit(0)
else:
for line in c.out.split('\n'):
# Ignore bad packages.
if line.split('==')[0] in BAD_PACKAGES:
continue
# Bold top-level packages.
if not line.startswith(' '):
click.echo(crayons.white(line, bold=True))
# Echo the rest.
else:
click.echo(crayons.white(line, bold=False))
else:
click.echo(c.out)
# Return its return code.
sys.exit(c.return_code)
@click.command(help="Uninstalls all packages, and re-installs package(s) in [packages] to latest compatible versions.")
@click.option('--verbose', '-v', is_flag=True, default=False, help="Verbose mode.")
@click.
<FILEB>
<CHANGES>
pkgs += "mosh python-scipy python-numpy default-jdk mpich2"
<CHANGEE>
<FILEE>
<FILEB>
mysqlpreseed = open(preseedf, 'w')
preseeds = """\"""
"""mysql-server mysql-server/root_password select"""
"""mysql-server mysql-server/root_password seen true"""
"""mysql-server mysql-server/root_password_again select"""
"""mysql-server mysql-server/root_password_again seen true"""
mysqlpreseed.write(preseeds)
mysqlpreseed.close()
run_command('debconf-set-selections < %s' % mysqlpreseed.name)
run_command('rm %s' % mysqlpreseed.name)
pkgs = "python-dev git vim mercurial subversion cvs encfs "
pkgs += "openmpi-bin libopenmpi-dev python-django "
pkgs += "keychain screen tmux zsh ksh csh tcsh python-mpi4py "
pkgs += "python-virtualenv python-imaging python-boto python-matplotlib "
pkgs += "unzip rar unace build-essential gfortran ec2-api-tools "
pkgs += "ec2-ami-tools mysql-server mysql-client apache2 "
pkgs += "libapache2-mod-wsgi sysv-rc-conf pssh emacs cython irssi htop "
pkgs += "python-distutils-extra vim-scripts python-ctypes python-pudb "
<CHANGES>
pkgs += "mosh python-scipy python-numpy default-jdk"
<CHANGEE>
apt_install(pkgs)
def configure_init():
for script in ['nfs-kernel-server', 'hadoop', 'condor', 'apache', 'mysql']:
run_command('find /etc/rc* -iname \*%s\* -delete' % script)
def cleanup():
run_command('rm /etc/resolv.conf')
run_command('rm /etc/mtab')
run_command('rm -rf /root/*')
exclude = ['/root/.bashrc', '/root/.profile', '/root/.bash_aliases']
for dot in glob.glob("/root/.*"):
if dot not in exclude:
run_command('rm -rf %s' % dot)
<FILEE>
<SCANS> False
return True
run_command('dpkg-buildpackage -rfakeroot -b',
failure_callback=_deb_failure_callback)
run_command('dpkg -i ../*scipy*.deb')
def install_openmpi():
chdir(SRC_DIR)
apt_command('build-dep libopenmpi-dev')
apt_install('blcr-util')
if glob.glob('*openmpi*.deb'):
run_command('dpkg -i *openmpi*.deb')
return
apt_command('source libopenmpi-dev')
chdir('openmpi*')
for line in fileinput.input('debian/rules', inplace=1):
print line,
if '--enable-heterogeneous' in line:
print ' --with-sge \\'
def _deb_failure_callback(retval):
if not glob.glob('../*openmpi*.deb'):
return False
return True
run_command('dpkg-buildpackage -rfakeroot -b',
failure_callback=_deb_failure_callback)
run_command('dpkg -i ../*openmpi*.deb')
sts, out = run_command('ompi_info | grep -i grid', get_output=True)
if 'gridengine' not in out:
raise Exception("failed to build openmpi with Grid Engine support")
def install_hadoop():
chdir(SRC_DIR)
hadoop_pkgs = ['namenode', 'datanode', 'tasktracker', 'jobtracker',
'secondarynamenode']
pkgs = ['hadoop-0.20'] + ['hadoop-0.20-%s' % pkg for pkg in hadoop_pkgs]
apt_install(' '.join(pkgs))
run_command('easy_install dumbo')
def install_ipython():
chdir(SRC_DIR)
apt_install('libzmq-dev')
run_command('pip install ipython tornado pygments pyzmq')
mjax_install = 'from IPython.external.mathjax import install_mathjax'
mjax_install += '; install_mathjax()'
run_command("python -c '%s'" % mjax_install)
def configure_motd():
for f in glob.glob('/etc/update-motd.d/*'):
os.unlink(f)
motd = open('/etc/update-motd.d
<FILEB>
<CHANGES>
formatted_time = str(datetime.timedelta(seconds=int(estimated_time_remaining)))
<CHANGEE>
<FILEE>
<FILEB>
# No validation set, so just assume it's the best so far.
is_best_so_far = True
val_metrics = {}
best_epoch_val_metrics = {}
this_epoch_val_metric = None
self._save_checkpoint(epoch, validation_metric_per_epoch, is_best=is_best_so_far)
self._metrics_to_tensorboard(epoch, train_metrics, val_metrics=val_metrics)
self._metrics_to_console(train_metrics, val_metrics)
if self._learning_rate_scheduler:
# The LRScheduler API is agnostic to whether your schedule requires a validation metric -
# if it doesn't, the validation metric passed here is ignored.
self._learning_rate_scheduler.step(this_epoch_val_metric, epoch)
epoch_elapsed_time = time.time() - epoch_start_time
logger.info("Epoch duration: %s", time.strftime("%H:%M:%S", time.gmtime(epoch_elapsed_time)))
if epoch < self._num_epochs - 1:
training_elapsed_time = time.time() - training_start_time
estimated_time_remaining = training_elapsed_time * \
((self._num_epochs - epoch_counter) / float(epoch - epoch_counter + 1) - 1)
<CHANGES>
formatted_time = time.strftime("%H:%M:%S", time.gmtime(estimated_time_remaining))
<CHANGEE>
logger.info("Estimated training time remaining: %s", formatted_time)
epochs_trained += 1
training_elapsed_time = time.time() - training_start_time
metrics = {
"training_duration": time.strftime("%H:%M:%S", time.gmtime(training_elapsed_time)),
"training_start_epoch": epoch_counter,
"training_epochs": epochs_trained
}
for key, value in train_metrics.items():
metrics["training_" + key] = value
for key, value in val_metrics.items():
metrics["validation_" + key] = value
<FILEE>
<SCANS>model_every_num_seconds = keep_serialized_model_every_num_seconds
self._serialized_paths: List[Any] = []
self._last_permanent_saved_checkpoint_time = time.time()
self._model_save_interval = model_save_interval
self._grad_norm = grad_norm
self._grad_clipping = grad_clipping
self._learning_rate_scheduler = learning_rate_scheduler
increase_or_decrease = validation_metric[0]
if increase_or_decrease not in ["+", "-"]:
raise ConfigurationError("Validation metrics must specify whether they should increase "
"or decrease by pre-pending the metric name with a +/-.")
self._validation_metric = validation_metric[1:]
self._validation_metric_decreases = increase_or_decrease == "-"
if not isinstance(cuda_device, int) and not isinstance(cuda_device, list):
raise ConfigurationError("Expected an int or list for cuda_device, got {}".format(cuda_device))
if isinstance(cuda_device, list):
logger.info(f"WARNING: Multiple GPU support is experimental not recommended for use. "
"In some cases it may lead to incorrect results or undefined behavior.")
self._multiple_gpu = True
self._cuda_devices = cuda_device
# data_parallel will take care of transfering to cuda devices,
# so the iterator keeps data on CPU.
self._iterator_device = -1
else:
self._multiple_gpu = False
self._cuda_devices = [cuda_device]
self._iterator_device = cuda_device
if self._cuda_devices[0] != -1:
self._model = self._model.cuda(self._cuda_devices[0])
self._log_interval = 10 # seconds
self._summary_interval = summary_interval
self._histogram_interval = histogram_interval
self._log_histograms_this_batch = False
# We keep the total batch number as a class variable because it
# is used inside a closure for the hook which logs activations in
# ``_enable_activation_logging``.
self._batch_num_total = 0
self._last_log = 0.0 # time of last logging
if serialization_dir is not None:
train_log = SummaryWriter(os.path.join(serialization_dir, "log", "train"))
<FILEB>
<CHANGES>
print(thes.scored_synonyms(word1))
<CHANGEE>
<FILEE>
<FILEB>
else:
return [(fileid, self._thesaurus[fileid][ngram].keys()) for fileid in self._fileids]
def __contains__(self, ngram):
'''Determines whether or not the given ngram is in the thesaurus.'''
''':param ngram: ngram to lookup'''
''':type ngram: C{string}'''
''':return: whether the given ngram is in the thesaurus.'''
return reduce(lambda accum, fileid: accum or (ngram in self._thesaurus[fileid]), self._fileids, False)
######################################################################
# Demo
######################################################################
def demo():
from nltk.corpus import lin_thesaurus as thes
word1 = "business"
word2 = "enterprise"
print("Getting synonyms for " + word1)
print(thes.synonyms(word1))
print("Getting scored synonyms for " + word1)
<CHANGES>
print(thes.synonyms(word1))
<CHANGEE>
print("Getting synonyms from simN.lsp (noun subsection) for " + word1)
print(thes.synonyms(word1, fileid="simN.lsp"))
print("Getting synonyms from simN.lsp (noun subsection) for " + word1)
print(thes.synonyms(word1, fileid="simN.lsp"))
print("Similarity score for %s and %s:" % (word1, word2))
print(thes.similarity(word1, word2))
if __name__ == '__main__':
demo()
<FILEE>
<SCANS>'''
''':return: If fileid is specified, just the score for the two ngrams; otherwise,'''
'''list of tuples of fileids and scores.'''
# Entries don't contain themselves, so make sure similarity between item and itself is 1.0
if ngram1 == ngram2:
if fileid:
return 1.0
else:
return [(fid, 1.0) for fid in self._fileids]
else:
if fileid:
return self._thesaurus[fileid][ngram1][ngram2] if ngram2 in self._thesaurus[fileid][ngram1] else self._badscore
else:
return [(fid, (self._thesaurus[fid][ngram1][ngram2] if ngram2 in self._thesaurus[fid][ngram1]
else self._badscore)) for fid in self._fileids]
def scored_synonyms(self, ngram, fileid=None):
'''Returns a list of scored synonyms (tuples of synonyms and scores) for the current ngram'''
''':param ngram: ngram to lookup'''
''':type ngram: C{string}'''
''':param fileid: thesaurus fileid to search in. If None, search all fileids.'''
''':type fileid: C{string}'''
''':return: If fileid is specified, list of tuples of scores and synonyms; otherwise,'''
'''list of tuples of fileids and lists, where inner lists consist of tuples of'''
'''scores and synonyms.'''
if fileid:
return self._thesaurus[fileid][ngram].items()
else:
return [(fileid, self._thesaurus[fileid][ngram].items()) for fileid in self._fileids]
def synonyms(self, ngram, fileid=None):
'''Returns a list of synonyms for the current ngram.'''
''':param ngram: ngram to lookup'''
''':type ngram: C{string}'''
''':param fileid: thesaurus fileid to search in. If None, search all fileids.'''
''':type fileid: C{string}'''
''':return: If fileid is specified, list of synonyms; otherwise, list of tuples of fileids and'''
'''lists, where inner lists contain synonyms.'''
if fileid:
return self._thesaurus[fileid][ngram].keys()
<FILEB>
<CHANGES>
_log.experimental("Opening new pull request for: %s", ', '.join(paths))
<CHANGEE>
<FILEE>
<FILEB>
remote_name = 'github_%s_%s' % (github_user, salt)
dry_run = build_option('dry_run') or build_option('extended_dry_run')
if not dry_run:
my_remote = git_repo.create_remote(remote_name, github_url)
res = my_remote.push(pr_branch)
if res:
if res[0].ERROR & res[0].flags:
raise EasyBuildError("Pushing branch '%s' to remote %s (%s) failed: %s",
pr_branch, my_remote, github_url, res[0].summary)
else:
_log.debug("Pushed branch %s to remote %s (%s): %s", pr_branch, my_remote, github_url, res[0].summary)
else:
raise EasyBuildError("Pushing branch '%s' to remote %s (%s) failed: empty result",
pr_branch, my_remote, github_url)
return file_info, git_repo, pr_branch, diff_stat
@only_if_module_is_available('git', pkgname='GitPython')
def new_pr(paths, title=None, descr=None, commit_msg=None):
"""Open new pull request using specified files."""
<CHANGES>
_log.experimental("Opening new pull request for with %s", paths)
<CHANGEE>
pr_branch_name = build_option('pr_branch_name')
pr_target_account = build_option('pr_target_account')
pr_target_repo = build_option('pr_target_repo')
# collect GitHub info we'll need
# * GitHub username to push branch to repo
# * GitHub token to open PR
github_user = build_option('github_user')
if github_user is None:
raise EasyBuildError("GitHub user must be specified to use --new-pr")
github_token = fetch_github_token(github_user)
if github_token is None:
raise EasyBuildError("GitHub token for user '%s' must be available to use --new-pr", github_user)
<FILEE>
<SCANS> get data for PR #%d from %s/%s (status: %d %s)",
pr, GITHUB_EB_MAIN, GITHUB_EASYCONFIGS_REPO, status, pr_data)
# 'clean' on successful (or missing) test, 'unstable' on failed tests
stable = pr_data['mergeable_state'] == GITHUB_MERGEABLE_STATE_CLEAN
if not stable:
_log.warning("Mergeable state for PR #%d is not '%s': %s.",
pr, GITHUB_MERGEABLE_STATE_CLEAN, pr_data['mergeable_state'])
for key, val in sorted(pr_data.items()):
_log.debug("\n%s:\n\n%s\n" % (key, val))
# determine list of changed files via diff
diff_fn = os.path.basename(pr_data['diff_url'])
diff_filepath = os.path.join(path, diff_fn)
download_file(diff_fn, pr_data['diff_url'], diff_filepath, forced=True)
diff_txt = read_file(diff_filepath)
os.remove(diff_filepath)
patched_files = det_patched_files(txt=diff_txt, omit_ab_prefix=True, github=True)
_log.debug("List of patched files: %s" % patched_files)
# obtain last commit
# get all commits, increase to (max of) 100 per page
if pr_data['commits'] > GITHUB_MAX_PER_PAGE:
raise EasyBuildError("PR #%s contains more than %s commits, can't obtain last commit", pr, GITHUB_MAX_PER_PAGE)
status, commits_data = github_api_get_request(lambda g: pr_url(g).commits, github_user,
per_page=GITHUB_MAX_PER_PAGE)
last_commit = commits_data[-1]
_log.debug("Commits: %s, last commit: %s" % (commits_data, last_commit['sha']))
# obtain most recent version of patched files
for patched_file in patched_files:
fn = os.path.basename(patched_file)
sha = last_commit['sha']
full_url = URL_SEPARATOR.join([GITHUB_RAW, GITHUB_EB_MAIN,
<FILEB>
<CHANGES>
raise RESTNoExecuteException("%s cannot be executed!" % self.name)
<CHANGEE>
<FILEE>
<FILEB>
}
class Element(object):
"""Abstract Element Class to be inherited by all Element elements"""
def __init__(self):
self.name = str(self.__class__.__name__)
self.nestables = None
self.attributes = {}
self.text = ''
self.children = []
def parse_element(self, element, uri=None):
self.prepare_attributes(element)
self.prepare_text(element)
def run(self, outbound_socket):
outbound_socket.log.info("[%s] %s %s" \
% (self.name, self.text, self.attributes))
execute = getattr(self, 'execute')
if not execute:
outbound_socket.log.error("[%s] cannot be executed !" % self.name)
<CHANGES>
raise RESTExecuteException("%s cannot be executed !" % self.name)
<CHANGEE>
result = execute(outbound_socket)
if not result:
outbound_socket.log.info("[%s] Done" % self.name)
else:
outbound_socket.log.info("[%s] Done -- Result %s" % (self.name, result))
def extract_attribute_value(self, item, default=None):
try:
item = self.attributes[item]
except KeyError:
item = default
return item
def prepare_attributes(self, element):
<FILEE>
<SCANS># -*- coding: utf-8 -*-
# Copyright (c) 2011 Plivo Team. See LICENSE for details.
import os.path
from datetime import datetime
import re
import uuid
from plivo.rest.freeswitch.helpers import is_valid_url, url_exists, \
file_exists
from plivo.rest.freeswitch.exceptions import RESTFormatException, \
RESTAttributeException, \
RESTRedirectException, \
RESTNoExecuteException
RECOGNIZED_SOUND_FORMATS = ['audio/mpeg', 'audio/wav', 'audio/x-wav']
ELEMENTS_DEFAULT_PARAMS = {
'Conference': {
#'room': SET IN ELEMENT BODY
'waitSound': '',
'muted': 'false',
'startConferenceOnEnter': 'true',
'endConferenceOnExit': 'false',
'maxMembers': 200,
'enterSound': '',
'exitSound': '',
'timeLimit': 0 ,
'hangupOnStar': 'false'
},
'Dial': {
#action: DYNAMIC! MUST BE SET IN METHOD,
'method': 'POST',
'hangupOnStar': 'false',
#callerId: DYNAMIC! MUST BE SET IN METHOD,
'timeLimit': 0,
'confirmSound': '',
'confirmKey': '',
'dialMusic': ''
},
'GetDigits': {
#action: DYNAMIC! MUST BE SET IN METHOD,
'method': 'POST',
'timeout': 5,
'finishOnKey': '#',
'numDigits': 99,
'retries': 1,
'playBeep': 'false',
'validDigits': '0123456789*#',
'invalidDigitsSound': ''
},
'Hangup': {
'reason': '',
'schedule': 0
},
'Number': {
#'gateways': DYNAMIC! MUST BE SET IN METHOD,
#'gatewayCodecs': DYNAMIC! MUST BE SET IN METHOD,
#'gatewayTimeouts': DYNAMIC! MUST BE SET IN METHOD,
#'gatewayRetries': DYNAMIC! MUST BE SET IN METHOD,
#'extraDialString': DYNAMIC! MUST BE SET IN METHOD,
'sendDigits': '',
},
'Wait': {
'length': 1
},
'Play': {
#url: SET IN ELEMENT BODY
'loop': 1
},
'Preanswer': {
},
'Record': {
'timeout': 15,
'finishOnKey': '1234567890*#',
'maxLength': 60,
'playBeep': 'true',
'filePath': '/usr/local/freeswitch/recordings/',
'format': 'mp3',
'prefix': '',
'bothLegs': 'false'
},
'Redirect': {
'method': 'POST'
},
'Speak': {
'voice': 'slt',
'language': 'en',
'loop': 1,
'engine': 'flite',
'method': '',
'type
<FILEB>
<CHANGES>
if retVal.upper() in kb.keywords or not re.match(r"\A[A-Za-z0-9_@%s\$]+\Z" % ("." if _ else ""), retVal): # MsSQL is the only DBMS where we automatically prepend schema to table name (dot is normal)
<CHANGEE>
<FILEE>
<FILEB>
kb.reflectiveMechanism = False
if not suppressWarning:
debugMsg = "turning off reflection removal mechanism (for optimization purposes)"
logger.debug(debugMsg)
return retVal
def normalizeUnicode(value):
"""Does an ASCII normalization of unicode strings"""
"""Reference: http://www.peterbe.com/plog/unicode-to-ascii"""
return unicodedata.normalize('NFKD', value).encode('ascii', 'ignore') if isinstance(value, unicode) else value
def safeSQLIdentificatorNaming(name, isTable=False):
"""Returns a safe representation of SQL identificator name (internal data format)"""
"""Reference: http://stackoverflow.com/questions/954884/what-special-characters-are-allowed-in-t-sql-column-retVal"""
retVal = name
if isinstance(name, basestring):
retVal = getUnicode(name)
_ = isTable and Backend.getIdentifiedDbms() in (DBMS.MSSQL, DBMS.SYBASE)
if _:
retVal = re.sub(r"(?i)\A%s\." % DEFAULT_MSSQL_SCHEMA, "", retVal)
<CHANGES>
if not re.match(r"\A[A-Za-z0-9_@%s\$]+\Z" % ("." if _ else ""), retVal): # MsSQL is the only DBMS where we automatically prepend schema to table name (dot is normal)
<CHANGEE>
if Backend.getIdentifiedDbms() in (DBMS.MYSQL, DBMS.ACCESS):
retVal = "`%s`" % retVal.strip("`")
elif Backend.getIdentifiedDbms() in (DBMS.ORACLE, DBMS.PGSQL, DBMS.DB2):
retVal = "\"%s\"" % retVal.strip("\"")
elif Backend.getIdentifiedDbms() in (DBMS.MSSQL,):
retVal = "[%s]" % retVal.strip("[]")
if _ and DEFAULT_MSSQL_SCHEMA not in retVal and '.' not in re.sub(r"\[[^]]+\]", "", retVal):
retVal = "%s.%s" % (DEFAULT_MSSQL_SCHEMA, retVal)
return retVal
def unsafeSQLIdentificatorNaming(name):
"""Extracts identificator<SCANS>Limit = average(kb.responseTimes) + TIME_STDEV_COEFF * deviation
retVal = (threadData.lastQueryDuration >= max(MIN_VALID_DELAYED_RESPONSE, lowerStdLimit))
if not kb.testMode and retVal:
if kb.adjustTimeDelay is None:
msg = "do you want sqlmap to try to optimize value(s) "
msg += "for DBMS delay responses (option '--time-sec')? [Y/n] "
choice = readInput(msg, default='Y')
kb.adjustTimeDelay = ADJUST_TIME_DELAY.DISABLE if choice.upper() == 'N' else ADJUST_TIME_DELAY.YES
if kb.adjustTimeDelay is ADJUST_TIME_DELAY.YES:
adjustTimeDelay(threadData.lastQueryDuration, lowerStdLimit)
return retVal
else:
return (threadData.lastQueryDuration - conf.timeSec) >= 0
def adjustTimeDelay(lastQueryDuration, lowerStdLimit):
"""Provides tip for adjusting time delay in time-based data retrieval"""
candidate = 1 + int(round(lowerStdLimit))
if candidate:
kb.delayCandidates = [candidate] + kb.delayCandidates[:-1]
if all((x == candidate for x in kb.delayCandidates)) and candidate < conf.timeSec:
conf.timeSec = candidate
infoMsg = "adjusting time delay to "
infoMsg += "%d second%s due to good response times" % (conf.timeSec, 's' if conf.timeSec > 1 else '')
logger.info(infoMsg)
def getLastRequestHTTPError():
"""Returns last HTTP error code"""
threadData = getCurrentThreadData()
return threadData.lastHTTPError[1] if threadData.lastHTTPError else None
def extractErrorMessage(page):
"""Returns reported error message from page if it founds one"""
""">>> extractErrorMessage(u'<html><title>Test</title>\\n<b>Warning</b>: oci_parse() [function.oci-parse]: ORA-01756: quoted string not properly terminated<br><p>Only a test page</p></html>')"""
"""u'oci_parse() [function.oci-parse]: ORA-01756: